text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-17 20:40
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('visit', '0109_auto_20160711_1927'),
]
operations = [
migrations.CreateModel(
name='ProgramDirector',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('schools', models.ManyToManyField(blank=True, related_name='programdirectors', to='visit.School')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('user',),
},
),
migrations.CreateModel(
name='SiteCoordinator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('school', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, related_name='sitecoordinators', to='visit.School')),
('user', models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('user',),
},
),
]
|
koebbe/homeworks
|
visit/migrations/0110_programdirector_sitecoordinator.py
|
Python
|
mit
| 1,603
|
[
"VisIt"
] |
c3250cc541edc49663ccc5a899e7204a9c9e9cdc5c695b9d8a06135d3a2ce8a7
|
"""
Vendored Project: AST Decompiler
Package: ast-decompiler
Version: 0.2
Author: Jelle Zijlstra
License: Apache v2
Project: https://github.com/JelleZijlstra/ast_decompiler
"""
import ast
from contextlib import contextmanager
import sys
try:
_basestring = basestring
_long = long
PY3 = False
except NameError:
# py3
_basestring = str
_long = int
PY3 = True
_OP_TO_STR = {
ast.Add: '+',
ast.Sub: '-',
ast.Mult: '*',
ast.Div: '/',
ast.Mod: '%',
ast.Pow: '**',
ast.LShift: '<<',
ast.RShift: '>>',
ast.BitOr: '|',
ast.BitXor: '^',
ast.BitAnd: '&',
ast.FloorDiv: '//',
ast.Invert: '~',
ast.Not: 'not ',
ast.UAdd: '+',
ast.USub: '-',
ast.Eq: '==',
ast.NotEq: '!=',
ast.Lt: '<',
ast.LtE: '<=',
ast.Gt: '>',
ast.GtE: '>=',
ast.Is: 'is',
ast.IsNot: 'is not',
ast.In: 'in',
ast.NotIn: 'not in',
ast.And: 'and',
ast.Or: 'or',
}
class _CallArgs(object):
"""Used as an entry in the precedence table.
Needed to convey the high precedence of the callee but low precedence of the arguments.
"""
_PRECEDENCE = {
_CallArgs: -1,
ast.Or: 0,
ast.And: 1,
ast.Not: 2,
ast.Compare: 3,
ast.BitOr: 4,
ast.BitXor: 5,
ast.BitAnd: 6,
ast.LShift: 7,
ast.RShift: 7,
ast.Add: 8,
ast.Sub: 8,
ast.Mult: 9,
ast.Div: 9,
ast.FloorDiv: 9,
ast.Mod: 9,
ast.UAdd: 10,
ast.USub: 10,
ast.Invert: 10,
ast.Pow: 11,
ast.Subscript: 12,
ast.Call: 12,
ast.Attribute: 12,
}
if hasattr(ast, 'MatMult'):
_OP_TO_STR[ast.MatMult] = '@'
_PRECEDENCE[ast.MatMult] = 9 # same as multiplication
def decompile(ast, indentation=4, line_length=100, starting_indentation=0):
"""Decompiles an AST into Python code.
Arguments:
- ast: code to decompile, using AST objects as generated by the standard library ast module
- indentation: indentation level of lines
- line_length: if lines become longer than this length, ast_decompiler will try to break them up
(but it will not necessarily succeed in all cases)
- starting_indentation: indentation level at which to start producing code
"""
decompiler = Decompiler(
indentation=indentation,
line_length=line_length,
starting_indentation=starting_indentation,
)
return decompiler.run(ast)
class Decompiler(ast.NodeVisitor):
def __init__(self, indentation, line_length, starting_indentation):
self.lines = []
self.current_line = []
self.current_indentation = starting_indentation
self.node_stack = []
self.indentation = indentation
self.max_line_length = line_length
self.has_unicode_literals = False
def run(self, ast):
self.visit(ast)
if self.current_line:
self.lines.append(''.join(self.current_line))
self.current_line = []
return ''.join(self.lines)
def visit(self, node):
self.node_stack.append(node)
try:
return super(Decompiler, self).visit(node)
finally:
if self.node_stack:
self.node_stack.pop()
def precedence_of_node(self, node):
if isinstance(node, (ast.BinOp, ast.UnaryOp, ast.BoolOp)):
return _PRECEDENCE[type(node.op)]
return _PRECEDENCE.get(type(node), -1)
def get_parent_node(self):
try:
return self.node_stack[-2]
except IndexError:
return None
def write(self, code):
assert isinstance(code, _basestring), 'invalid code %r' % code
self.current_line.append(code)
def write_indentation(self):
self.write(' ' * self.current_indentation)
def write_newline(self):
line = ''.join(self.current_line) + '\n'
self.lines.append(line)
self.current_line = []
def current_line_length(self):
return sum(map(len, self.current_line))
def write_expression_list(self, nodes, separator=', ', allow_newlines=True, need_parens=True,
final_separator_if_multiline=True):
"""Writes a list of nodes, separated by separator.
If allow_newlines, will write the expression over multiple lines if necessary to say within
max_line_length. If need_parens, will surround the expression with parentheses in this case.
If final_separator_if_multiline, will write a separator at the end of the list if it is
divided over multiple lines.
"""
first = True
last_line = len(self.lines)
current_line = list(self.current_line)
for node in nodes:
if first:
first = False
else:
self.write(separator)
self.visit(node)
if allow_newlines and (self.current_line_length() > self.max_line_length or
last_line != len(self.lines)):
break
else:
return # stayed within the limit
# reset state
del self.lines[last_line:]
self.current_line = current_line
separator = separator.rstrip()
if need_parens:
self.write('(')
self.write_newline()
with self.add_indentation():
num_nodes = len(nodes)
for i, node in enumerate(nodes):
self.write_indentation()
self.visit(node)
if final_separator_if_multiline or i < num_nodes - 1:
self.write(separator)
self.write_newline()
self.write_indentation()
if need_parens:
self.write(')')
def write_suite(self, nodes):
with self.add_indentation():
for line in nodes:
self.visit(line)
@contextmanager
def add_indentation(self):
self.current_indentation += self.indentation
try:
yield
finally:
self.current_indentation -= self.indentation
@contextmanager
def parenthesize_if(self, condition):
if condition:
self.write('(')
yield
self.write(')')
else:
yield
def generic_visit(self, node):
raise NotImplementedError('missing visit method for %r' % node)
def visit_Module(self, node):
for line in node.body:
self.visit(line)
visit_Interactive = visit_Module
def visit_Expression(self, node):
self.visit(node.body)
# Multi-line statements
def visit_FunctionDef(self, node):
self.write_function_def(node)
def visit_AsyncFunctionDef(self, node):
self.write_function_def(node, is_async=True)
def write_function_def(self, node, is_async=False):
self.write_newline()
for decorator in node.decorator_list:
self.write_indentation()
self.write('@')
self.visit(decorator)
self.write_newline()
self.write_indentation()
if is_async:
self.write('async ')
self.write('def %s(' % node.name)
self.visit(node.args)
self.write(')')
if getattr(node, 'returns', None):
self.write(' -> ')
self.visit(node.returns)
self.write(':')
self.write_newline()
self.write_suite(node.body)
def visit_ClassDef(self, node):
self.write_newline()
self.write_newline()
for decorator in node.decorator_list:
self.write_indentation()
self.write('@')
self.visit(decorator)
self.write_newline()
self.write_indentation()
self.write('class %s(' % node.name)
exprs = node.bases + getattr(node, 'keywords', [])
self.write_expression_list(exprs, need_parens=False)
self.write('):')
self.write_newline()
self.write_suite(node.body)
def visit_For(self, node):
self.write_for(node)
def visit_AsyncFor(self, node):
self.write_for(node, is_async=True)
def write_for(self, node, is_async=False):
self.write_indentation()
if is_async:
self.write('async ')
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
self.write(':')
self.write_newline()
self.write_suite(node.body)
self.write_else(node.orelse)
def visit_While(self, node):
self.write_indentation()
self.write('while ')
self.visit(node.test)
self.write(':')
self.write_newline()
self.write_suite(node.body)
self.write_else(node.orelse)
def visit_If(self, node):
self.write_indentation()
self.write('if ')
self.visit(node.test)
self.write(':')
self.write_newline()
self.write_suite(node.body)
while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], ast.If):
node = node.orelse[0]
self.write_indentation()
self.write('elif ')
self.visit(node.test)
self.write(':')
self.write_newline()
self.write_suite(node.body)
self.write_else(node.orelse)
def write_else(self, orelse):
if orelse:
self.write_indentation()
self.write('else:')
self.write_newline()
self.write_suite(orelse)
def visit_With(self, node):
if hasattr(node, 'items'):
self.write_py3_with(node)
return
self.write_indentation()
self.write('with ')
nodes = [node]
body = node.body
is_first = True
while len(body) == 1 and isinstance(body[0], ast.With):
nodes.append(body[0])
body = body[0].body
for context_node in nodes:
if is_first:
is_first = False
else:
self.write(', ')
self.visit(context_node.context_expr)
if context_node.optional_vars:
self.write(' as ')
self.visit(context_node.optional_vars)
self.write(':')
self.write_newline()
self.write_suite(body)
def visit_AsyncWith(self, node):
self.write_py3_with(node, is_async=True)
def write_py3_with(self, node, is_async=False):
self.write_indentation()
if is_async:
self.write('async ')
self.write('with ')
self.write_expression_list(node.items, allow_newlines=False)
self.write(':')
self.write_newline()
self.write_suite(node.body)
def visit_withitem(self, node):
self.visit(node.context_expr)
if node.optional_vars:
self.write(' as ')
self.visit(node.optional_vars)
def visit_Try(self, node):
# py3 only
self.visit_TryExcept(node)
if node.finalbody:
self.write_finalbody(node.finalbody)
def visit_TryExcept(self, node):
self.write_indentation()
self.write('try:')
self.write_newline()
self.write_suite(node.body)
for handler in node.handlers:
self.visit(handler)
self.write_else(node.orelse)
def visit_TryFinally(self, node):
if len(node.body) == 1 and isinstance(node.body[0], ast.TryExcept):
self.visit(node.body[0])
else:
self.write_indentation()
self.write('try:')
self.write_newline()
self.write_suite(node.body)
self.write_finalbody(node.finalbody)
def write_finalbody(self, body):
self.write_indentation()
self.write('finally:')
self.write_newline()
self.write_suite(body)
# One-line statements
def visit_Return(self, node):
self.write_indentation()
self.write('return')
if node.value:
self.write(' ')
self.visit(node.value)
self.write_newline()
def visit_Delete(self, node):
self.write_indentation()
self.write('del ')
self.write_expression_list(node.targets, allow_newlines=False)
self.write_newline()
def visit_Assign(self, node):
self.write_indentation()
self.write_expression_list(node.targets, separator=' = ', allow_newlines=False)
self.write(' = ')
self.visit(node.value)
self.write_newline()
def visit_AugAssign(self, node):
self.write_indentation()
self.visit(node.target)
self.write(' ')
self.visit(node.op)
self.write('= ')
self.visit(node.value)
self.write_newline()
def visit_AnnAssign(self, node):
self.write_indentation()
if not node.simple:
self.write('(')
self.visit(node.target)
if not node.simple:
self.write(')')
self.write(': ')
self.visit(node.annotation)
if node.value is not None:
self.write(' = ')
self.visit(node.value)
self.write_newline()
def visit_Print(self, node):
self.write_indentation()
self.write('print')
if node.dest:
self.write(' >>')
self.visit(node.dest)
if node.values:
self.write(',')
if node.values:
self.write(' ')
self.write_expression_list(node.values, allow_newlines=False)
if not node.nl:
self.write(',')
self.write_newline()
def visit_Raise(self, node):
self.write_indentation()
self.write('raise')
if hasattr(node, 'exc'):
# py3
if node.exc is not None:
self.write(' ')
self.visit(node.exc)
if node.cause is not None:
self.write(' from ')
self.visit(node.cause)
else:
expressions = [child for child in (node.type, node.inst, node.tback) if child]
if expressions:
self.write(' ')
self.write_expression_list(expressions, allow_newlines=False)
self.write_newline()
def visit_Assert(self, node):
self.write_indentation()
self.write('assert ')
self.visit(node.test)
if node.msg:
self.write(', ')
self.visit(node.msg)
self.write_newline()
def visit_Import(self, node):
self.write_indentation()
self.write('import ')
self.write_expression_list(node.names, allow_newlines=False)
self.write_newline()
def visit_ImportFrom(self, node):
if (
node.module == '__future__' and
any(alias.name == 'unicode_literals' for alias in node.names)
):
self.has_unicode_literals = True
self.write_indentation()
self.write('from %s' % ('.' * (node.level or 0)))
if node.module:
self.write(node.module)
self.write(' import ')
self.write_expression_list(node.names)
self.write_newline()
def visit_Exec(self, node):
self.write_indentation()
self.write('exec ')
self.visit(node.body)
if node.globals:
self.write(' in ')
self.visit(node.globals)
if node.locals:
self.write(', ')
self.visit(node.locals)
self.write_newline()
def visit_Global(self, node):
self.write_indentation()
self.write('global %s' % ', '.join(node.names))
self.write_newline()
def visit_Nonlocal(self, node):
self.write_indentation()
self.write('nonlocal %s' % ', '.join(node.names))
self.write_newline()
def visit_Expr(self, node):
self.write_indentation()
self.visit(node.value)
self.write_newline()
def visit_Pass(self, node):
self.write_indentation()
self.write('pass')
self.write_newline()
def visit_Break(self, node):
self.write_indentation()
self.write('break')
self.write_newline()
def visit_Continue(self, node):
self.write_indentation()
self.write('continue')
self.write_newline()
# Expressions
def visit_BoolOp(self, node):
my_prec = self.precedence_of_node(node)
parent_prec = self.precedence_of_node(self.get_parent_node())
with self.parenthesize_if(my_prec <= parent_prec):
op = 'and' if isinstance(node.op, ast.And) else 'or'
self.write_expression_list(
node.values,
separator=' %s ' % op,
final_separator_if_multiline=False,
)
def visit_BinOp(self, node):
parent_node = self.get_parent_node()
my_prec = self.precedence_of_node(node)
parent_prec = self.precedence_of_node(parent_node)
if my_prec < parent_prec:
should_parenthesize = True
elif my_prec == parent_prec:
if isinstance(node.op, ast.Pow):
should_parenthesize = node == parent_node.left
else:
should_parenthesize = node == parent_node.right
else:
should_parenthesize = False
with self.parenthesize_if(should_parenthesize):
self.visit(node.left)
self.write(' ')
self.visit(node.op)
self.write(' ')
self.visit(node.right)
def visit_UnaryOp(self, node):
my_prec = self.precedence_of_node(node)
parent_prec = self.precedence_of_node(self.get_parent_node())
with self.parenthesize_if(my_prec < parent_prec):
self.visit(node.op)
self.visit(node.operand)
def visit_Lambda(self, node):
should_parenthesize = isinstance(
self.get_parent_node(),
(ast.BinOp, ast.UnaryOp, ast.Compare, ast.IfExp, ast.Attribute, ast.Subscript, ast.Call, ast.BoolOp)
)
with self.parenthesize_if(should_parenthesize):
self.write('lambda')
if node.args.args or node.args.vararg or node.args.kwarg:
self.write(' ')
self.visit(node.args)
self.write(': ')
self.visit(node.body)
def visit_IfExp(self, node):
parent_node = self.get_parent_node()
if isinstance(parent_node,
(ast.BinOp, ast.UnaryOp, ast.Compare, ast.Attribute, ast.Subscript,
ast.Call, ast.BoolOp, ast.comprehension)):
should_parenthesize = True
elif isinstance(parent_node, ast.IfExp) and \
(node is parent_node.test or node is parent_node.body):
should_parenthesize = True
else:
should_parenthesize = False
with self.parenthesize_if(should_parenthesize):
self.visit(node.body)
self.write(' if ')
self.visit(node.test)
self.write(' else ')
self.visit(node.orelse)
def visit_Dict(self, node):
self.write('{')
items = [KeyValuePair(key, value) for key, value in zip(node.keys, node.values)]
self.write_expression_list(items, need_parens=False)
self.write('}')
def visit_KeyValuePair(self, node):
self.visit(node.key)
self.write(': ')
self.visit(node.value)
def visit_Set(self, node):
self.write('{')
self.write_expression_list(node.elts, need_parens=False)
self.write('}')
def visit_ListComp(self, node):
self.visit_comp(node, '[', ']')
def visit_SetComp(self, node):
self.visit_comp(node, '{', '}')
def visit_DictComp(self, node):
self.write('{')
elts = [KeyValuePair(node.key, node.value)] + node.generators
self.write_expression_list(elts, separator=' ', need_parens=False)
self.write('}')
def visit_GeneratorExp(self, node):
self.visit_comp(node, '(', ')')
def visit_comp(self, node, start, end):
self.write(start)
self.write_expression_list([node.elt] + node.generators, separator=' ', need_parens=False)
self.write(end)
def visit_Await(self, node):
with self.parenthesize_if(
not isinstance(self.get_parent_node(), (ast.Expr, ast.Assign, ast.AugAssign))):
self.write('await ')
self.visit(node.value)
def visit_Yield(self, node):
with self.parenthesize_if(
not isinstance(self.get_parent_node(), (ast.Expr, ast.Assign, ast.AugAssign))):
self.write('yield')
if node.value:
self.write(' ')
self.visit(node.value)
def visit_YieldFrom(self, node):
with self.parenthesize_if(
not isinstance(self.get_parent_node(), (ast.Expr, ast.Assign, ast.AugAssign))):
self.write('yield from ')
self.visit(node.value)
def visit_Compare(self, node):
my_prec = self.precedence_of_node(node)
parent_prec = self.precedence_of_node(self.get_parent_node())
with self.parenthesize_if(my_prec <= parent_prec):
self.visit(node.left)
for op, expr in zip(node.ops, node.comparators):
self.write(' ')
self.visit(op)
self.write(' ')
self.visit(expr)
def visit_Call(self, node):
self.visit(node.func)
self.write('(')
self.node_stack.append(_CallArgs())
try:
args = node.args + node.keywords
if hasattr(node, 'starargs') and node.starargs:
args.append(StarArg(node.starargs))
if hasattr(node, 'kwargs') and node.kwargs:
args.append(DoubleStarArg(node.kwargs))
if args:
self.write_expression_list(
args,
need_parens=False,
final_separator_if_multiline=False # it's illegal after *args and **kwargs
)
self.write(')')
finally:
self.node_stack.pop()
def visit_StarArg(self, node):
self.write('*')
self.visit(node.arg)
def visit_DoubleStarArg(self, node):
self.write('**')
self.visit(node.arg)
def visit_KeywordArg(self, node):
self.visit(node.arg)
self.write('=')
self.visit(node.value)
def visit_Repr(self, node):
self.write('`')
self.visit(node.value)
self.write('`')
def visit_Num(self, node):
should_parenthesize = isinstance(node.n, int) and node.n >= 0 and \
isinstance(self.get_parent_node(), ast.Attribute)
should_parenthesize = should_parenthesize or (isinstance(node.n, complex) and
node.n.real == 0.0 and (node.n.imag < 0 or node.n.imag == -0.0))
if not should_parenthesize:
parent_node = self.get_parent_node()
should_parenthesize = isinstance(parent_node, ast.UnaryOp) and \
isinstance(parent_node.op, ast.USub) and \
hasattr(parent_node, 'lineno')
with self.parenthesize_if(should_parenthesize):
if isinstance(node.n, float) and abs(node.n) > sys.float_info.max:
# otherwise we write inf, which won't be parsed back right
# I don't know of any way to write nan with a literal
self.write('1e1000' if node.n > 0 else '-1e1000')
elif isinstance(node.n, (int, _long, float)) and node.n < 0:
# needed for precedence to work correctly
me = self.node_stack.pop()
if isinstance(node.n, int):
val = str(-node.n)
else:
val = repr(type(node.n)(-node.n)) # - of long may be int
self.visit(ast.UnaryOp(op=ast.USub(), operand=ast.Name(id=val)))
self.node_stack.append(me)
else:
self.write(repr(node.n))
def visit_Str(self, node):
if self.has_unicode_literals and isinstance(node.s, str):
self.write('b')
self.write(repr(node.s))
def visit_FormattedValue(self, node):
has_parent = isinstance(self.get_parent_node(), ast.JoinedStr)
if not has_parent:
self.write('f"')
self.write('{')
self.visit(node.value)
if node.conversion != -1:
self.write('!%s' % chr(node.conversion))
if node.format_spec is not None:
self.write(':')
if not isinstance(node.format_spec, ast.Str):
raise TypeError('format spec must be a string')
self.write(node.format_spec.s)
self.write('}')
if not has_parent:
self.write('"')
def visit_JoinedStr(self, node):
self.write("f'")
for value in node.values:
if isinstance(value, ast.Str):
self.write(value.s)
else:
self.visit(value)
self.write("'")
def visit_Bytes(self, node):
self.write(repr(node.s))
def visit_NameConstant(self, node):
self.write(repr(node.value))
def visit_Constant(self, node):
# TODO what is this
raise NotImplementedError(ast.dump(node))
def visit_Attribute(self, node):
self.visit(node.value)
self.write('.%s' % node.attr)
def visit_Subscript(self, node):
self.visit(node.value)
self.write('[')
self.visit(node.slice)
self.write(']')
def visit_Starred(self, node):
# TODO precedence
self.write('*')
self.visit(node.value)
def visit_Name(self, node):
if isinstance(node.id, _basestring):
self.write(node.id)
else:
self.visit(node.id)
def visit_List(self, node):
self.write('[')
self.write_expression_list(node.elts, need_parens=False)
self.write(']')
def visit_Tuple(self, node):
if not node.elts:
self.write('()')
else:
should_parenthesize = not isinstance(
self.get_parent_node(),
(ast.Expr, ast.Assign, ast.AugAssign, ast.Return, ast.Yield)
)
with self.parenthesize_if(should_parenthesize):
if len(node.elts) == 1:
self.visit(node.elts[0])
self.write(',')
else:
self.write_expression_list(node.elts, need_parens=not should_parenthesize)
# slice
def visit_Ellipsis(self, node):
if PY3:
self.write('...')
elif isinstance(self.get_parent_node(), (ast.Subscript, ast.ExtSlice)):
# self[...] gets parsed into Ellipsis without an intervening Index node
self.write('...')
else:
self.write('Ellipsis')
def visit_Slice(self, node):
if node.lower:
self.visit(node.lower)
self.write(':')
if node.upper:
self.visit(node.upper)
if node.step:
self.write(':')
self.visit(node.step)
def visit_ExtSlice(self, node):
if len(node.dims) == 1:
self.visit(node.dims[0])
self.write(',')
else:
self.write_expression_list(node.dims, need_parens=False)
def visit_Index(self, node):
self.visit(node.value)
# operators
for op, string in _OP_TO_STR.items():
exec('def visit_%s(self, node): self.write(%r)' % (op.__name__, string))
# Other types
visit_Load = visit_Store = visit_Del = visit_AugLoad = visit_AugStore = visit_Param = \
lambda self, node: None
def visit_comprehension(self, node):
self.write('for ')
self.visit(node.target)
self.write(' in ')
self.visit(node.iter)
for expr in node.ifs:
self.write(' if ')
self.visit(expr)
def visit_ExceptHandler(self, node):
self.write_indentation()
self.write('except')
if node.type:
self.write(' ')
self.visit(node.type)
if node.name:
self.write(' as ')
# node.name is a string in py3 but an expr in py2
if isinstance(node.name, _basestring):
self.write(node.name)
else:
self.visit(node.name)
self.write(':')
self.write_newline()
self.write_suite(node.body)
def visit_arguments(self, node):
num_defaults = len(node.defaults)
if num_defaults:
args = node.args[:-num_defaults]
default_args = zip(node.args[-num_defaults:], node.defaults)
else:
args = list(node.args)
default_args = []
for name, value in default_args:
args.append(KeywordArg(name, value))
if node.vararg:
args.append(StarArg(ast.Name(id=node.vararg)))
# TODO write a * if there are kwonly args but no vararg
if hasattr(node, 'kw_defaults'):
if node.kwonlyargs and not node.vararg:
args.append(StarArg(ast.Name(id='')))
num_kwarg_defaults = len(node.kw_defaults)
if num_kwarg_defaults:
args += node.kwonlyargs[:-num_kwarg_defaults]
default_kwargs = zip(node.kwonlyargs[-num_kwarg_defaults:], node.kw_defaults)
else:
args += node.kwonlyargs
default_kwargs = []
for name, value in default_kwargs:
args.append(KeywordArg(name, value))
if node.kwarg:
args.append(DoubleStarArg(ast.Name(id=node.kwarg)))
if args:
# lambdas can't have a multiline arglist
allow_newlines = not isinstance(self.get_parent_node(), ast.Lambda)
self.write_expression_list(
args,
allow_newlines=allow_newlines,
need_parens=False,
final_separator_if_multiline=False # illegal after **kwargs
)
def visit_arg(self, node):
self.write(node.arg)
if node.annotation:
self.write(': ')
# TODO precedence
self.visit(node.annotation)
def visit_keyword(self, node):
if node.arg is None:
# in py3, **kwargs is a keyword whose arg is None
self.write('**')
else:
self.write(node.arg + '=')
self.visit(node.value)
def visit_alias(self, node):
self.write(node.name)
if node.asname is not None:
self.write(' as %s' % node.asname)
# helper ast nodes to make decompilation easier
class KeyValuePair(object):
"""A key-value pair as used in a dictionary display."""
_fields = ['key', 'value']
def __init__(self, key, value):
self.key = key
self.value = value
class StarArg(object):
"""A * argument."""
_fields = ['arg']
def __init__(self, arg):
self.arg = arg
class DoubleStarArg(object):
"""A ** argument."""
_fields = ['arg']
def __init__(self, arg):
self.arg = arg
class KeywordArg(object):
"""A x=3 keyword argument in a function definition."""
_fields = ['arg', 'value']
def __init__(self, arg, value):
self.arg = arg
self.value = value
|
jmvrbanac/Specter
|
specter/vendor/ast_decompiler.py
|
Python
|
mit
| 31,554
|
[
"VisIt"
] |
24261536ac8354ba939fde37b7b6802d74078e13afb27e213f09e264a67dd39f
|
# Copyright (C) 2016
# Max Planck Institute for Polymer Research
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
******************************
espressopp.integrator.Rattle
******************************
RATTLE algorithm for satisfying bond constraints and making the corresponding velocity corrections.
Refs:
Andersen, H. C. Rattle: A velocity version of the Shake algorithm for molecular dynamics calculations, J. Comp. Physics, 52, 24-34 (1983)
Allen & Tildesley, Computer Simulation of Liquids, OUP, 1987
RATTLE is implemented as an integrator extension, and takes as input a list of lists detailing, for each bond to be constrained: the indices of the two particles involved, the constraint distance, and the particle masses.
This implementation is intended for use with hydrogen-heavy atom bonds, which form isolated groups of constrained bonds, e.g NH2 or CH3 groups. The particle which participates in only one constrained bond (i.e. the hydrogen) should be listed first. The particle listed second (the heavy atom) may participate in more than one constrained bond. This implementation will not work if both particles participate in more than one constrained bond.
Note: At the moment, the RATTLE implementation only works if all atoms in an isolated group of rigid bonds are on the same CPU. This can be achieved by grouping all the particles using DomainDecompositionAdress and FixedTupleListAdress. The groups of rigid bonds can be identified using the dictionary constrainedBondsDict (see example below).
Note: The constraints are not taken into account in other parts of the code, such as temperature or pressure calculation.
Python example script for one methanol molecule where atoms are indexed in the order C H1 H2 H3 OH HO:
>>> # list for each constrained bond which lists: heavy atom index, light atom index, bond length, heavy atom mass, light atom mass
>>> constrainedBondsList = [[1, 2, 0.109, 12.011, 1.008], [1, 3, 0.109, 12.011, 1.008], [1, 4, 0.109, 12.011, 1.008], [5, 6, 0.096, 15.9994, 1.008]]
>>> rattle = espressopp.integrator.Rattle(system, maxit = 1000, tol = 1e-6, rptol = 1e-6)
>>> rattle.addConstrainedBonds(constrainedBondsList)
>>> integrator.addExtension(rattle)
This list of lists of constrained bonds can be conveniently built using the espressopppp tool `findConstrainedBonds`.
>>> # Automatically identify hydrogen-containing bonds among the particles whose indices are in the list pidlist
>>> # pidlist - list of indices of particles in which to search for hydrogens (list of int)
>>> # masses - list of masses of all particles (list of real)
>>> # massCutoff - atoms with mass < massCutoff are identified as hydrogens (real)
>>> # bondtypes - dictionary (e.g. obtained using espressopppp.gromacs.read()), key: bondtype (int), value: list of tuples of the indices of the two particles in each bond of that bondtype (list of 2-tuples of integers)
>>> # bondtypeparams - dictionary (e.g. obtained using espressopppp.gromacs.read()), key: bondtype (int), value: espressopppp interaction potential instance
>>> hydrogenIDs, constrainedBondsDict, constrainedBondsList = espressopp.tools.findConstrainedBonds(pidlist, bondtypes, bondtypeparams, masses, massCutoff = 1.1)
>>> # hydrogenIDs - list of indices of hydrogen atoms
>>> # constrainedBondsDict - dictionary mapping from a heavy atom to all the light atoms it is bonded to, key: heavy atom index (int), value: list of light atom indices (list of int)
>>> # constrainedBondsList - list of lists, constrained bonds for use with Rattle.addConstrainedBonds()
>>> print "# found", len(hydrogenIDs)," hydrogens in the solute"
>>> print "# found", len(constrainedBondsDict)," heavy atoms involved in bonds to hydrogen"
>>> print "# will constrain", len(constrainedBondsList)," bonds using RATTLE"
.. function:: espressopppp.integrator.Rattle(system, maxit = 1000, tol = 1e-6, rptol = 1e-6)
:param espressopp.System system: espressopp system
:param int maxit: maximum number of iterations
:param real tol: tolerance for deciding if constraint distance and current distance are similar enough
:param real rptol: tolerance for deciding if the angle between the bond vector at end of previous timestep and current vector has become too large
.. function:: espressopppp.integrator.Rattle.addConstrainedBonds(bondDetailsLists)
:param bondDetailsLists: list of lists, each list contains pid of heavy atom, pid of light atom, constraint distance, mass of heavy atom, mass of light atom
:type bondDetailsLists: list of [int, int, real, real, real]
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_Rattle
class RattleLocal(ExtensionLocal, integrator_Rattle):
def __init__(self, system, maxit = 1000, tol = 1e-6, rptol = 1e-6):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_Rattle, system, maxit, tol, rptol)
def addConstrainedBonds(self, bondDetailsLists):
"""
Each processor takes the broadcasted list.
"""
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
for blist in bondDetailsLists: #each list contains int pid1, int pid2, real constraintDist, real mass1, real mass2
self.cxxclass.addBond(self, blist[0], blist[1], blist[2], blist[3], blist[4])
if pmi.isController:
class Rattle(Extension, metaclass=pmi.Proxy):
pmiproxydefs = dict(
cls = 'espressopp.integrator.RattleLocal',
pmicall = [ "addConstrainedBonds" ]
)
|
espressopp/espressopp
|
src/integrator/Rattle.py
|
Python
|
gpl-3.0
| 6,436
|
[
"ESPResSo",
"Gromacs"
] |
12080ae519739ea30fe0a630d2d2f39cff36acda5e302fa689327755d2a3a831
|
#!/usr/local/bin/yade-trunk -x
# -*- encoding=utf-8 -*-
# CWBoon 2015
# Some of the parameters are defunct..
# I use this for jointed rock mass, but you can use it to generate granular particles too
from yade import pack
import math
O.engines=[
ForceResetter(),
InsertionSortCollider([PotentialBlock2AABB()],verletDist=0),
InteractionLoop(
[Ig2_PB_PB_ScGeom()],
[Ip2_FrictMat_FrictMat_KnKsPBPhys(Knormal = 1e8, Kshear = 1e8,useFaceProperties=False,calJointLength=False,twoDimension=True,unitWidth2D=1.0,viscousDamping=0.7)],
[Law2_SCG_KnKsPBPhys_KnKsPBLaw(label='law',neverErase=False)]
#[Ip2_FrictMat_FrictMat_FrictPhys()],
#[Law2_ScGeom_FrictPhys_CundallStrack()]
),
#GravityEngine(gravity=[0,-10,0]),
#GlobalStiffnessTimeStepper(),
NewtonIntegrator(damping=0.0,exactAsphericalRot=False,gravity=[0,-10,0]),
#PotentialBlockVTKRecorder(fileName='/home/chiab/yadeNew/mosek/8Nov/BranchA/scripts2/boon/ComputersGeotechnics/vtk/1000PP',iterPeriod=100,sampleX=50,sampleY=50,sampleZ=50)
]
powderDensity = 10000
distanceToCentre= 0.5
meanSize = 1.0
wallThickness = 0.5*meanSize
O.materials.append(FrictMat(young=150e6,poisson=.4,frictionAngle=radians(0.0),density=powderDensity,label='frictionless'))
lengthOfBase = 9.0*meanSize
heightOfBase = 14.0*meanSize
sp=pack.SpherePack()
mn,mx=Vector3(-0.5*(lengthOfBase-wallThickness),0.5*meanSize,-0.5*(lengthOfBase-wallThickness)),Vector3(0.5*(lengthOfBase-wallThickness),7.0*heightOfBase,0.5*(lengthOfBase-wallThickness))
sphereRad = sqrt(3.0)*0.5*meanSize
sp.makeCloud(mn,mx,sphereRad,0,100,False)
count= 0
rPP=0.05*meanSize
for s in sp:
b=Body()
radius=2.2
dynamic=True
wire=False
color=[0,0,255.0]
highlight=False
b.shape=PotentialBlock(k=0.2, r=0.05*meanSize, R=1.02*sphereRad, a=[1.0,-1.0,0.0,0.0,0.0,0.0], b=[0.0,0.0,1.0,-1.0,0.0,0.0], c=[0.0,0.0,0.0,0.0,1.0,-1.0], d=[distanceToCentre-rPP,distanceToCentre-rPP,distanceToCentre-rPP,distanceToCentre-rPP,distanceToCentre-rPP,distanceToCentre-rPP],isBoundary=False,color=color,wire=wire,highlight=highlight,minAabb=Vector3(sphereRad,sphereRad,sphereRad),maxAabb=Vector3(sphereRad,sphereRad,sphereRad),maxAabbRotated=Vector3(sphereRad,sphereRad,sphereRad),minAabbRotated=Vector3(sphereRad,sphereRad,sphereRad),AabbMinMax=False)
length=meanSize
V= 1.0
geomInert=(2./5.)*powderDensity*V*sphereRad**2
utils._commonBodySetup(b,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=s[0], noBound=False, resetState=True, fixed=False)
b.state.pos = s[0] #s[0] stores center
b.state.ori = Quaternion((random.random(),random.random(),random.random()),random.random()) #s[2]
O.bodies.append(b)
b.dynamic = True
count =count+1
#v1 = (0, 0, 0.2) (0, 0, 1) (0,0,2.498)
#v2 = (-0.0943, 0.1633, -0.0667) (-0.4714, 0.8165, -0.3334)
#v3 = (0.1886 0 -0.0667) (0.9428, 0, -0.3333)
#edge = 0.3266 1.6333 4.08
#volume = 0.0041 0.5132 8
r=0.1*wallThickness
bbb=Body()
wire=False
color=[0,255,0]
highlight=False
bbb.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.2*lengthOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[lengthOfBase/6.0-r,lengthOfBase/6.0-r,0.5*wallThickness-r,0.5*wallThickness-r,lengthOfBase/6.0-r,lengthOfBase/6.0-r], id=count,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),maxAabb=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),maxAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),minAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(bbb,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True, fixed=True)
bbb.dynamic=False
bbb.state.pos = [0.0,0,0]
lidID = O.bodies.append(bbb)
b1=Body()
wire=False
color=[0,255,0]
highlight=False
b1.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.2*lengthOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[lengthOfBase/6.0-r,lengthOfBase/6.0-r,0.5*wallThickness-r,0.5*wallThickness-r,lengthOfBase/6.0-r,lengthOfBase/6.0-r], id=count,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),minAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(b1,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
b1.dynamic=False
b1.state.pos = [lengthOfBase/3.0,0,lengthOfBase/3.0]
O.bodies.append(b1)
b2=Body()
wire=False
color=[0,255,0]
highlight=False
b2.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.2*lengthOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[lengthOfBase/6.0-r,lengthOfBase/6.0-r,0.5*wallThickness-r,0.5*wallThickness-r,lengthOfBase/6.0-r,lengthOfBase/6.0-r], id=count,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),minAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(b2,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
b2.dynamic=False
b2.state.pos = [-lengthOfBase/3.0,0,lengthOfBase/3.0]
O.bodies.append(b2)
b3=Body()
wire=False
color=[0,255,0]
highlight=False
b3.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.2*lengthOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[lengthOfBase/6.0-r,lengthOfBase/6.0-r,0.5*wallThickness-r,0.5*wallThickness-r,lengthOfBase/6.0-r,lengthOfBase/6.0-r], id=count,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),minAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(b3,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
b3.dynamic=False
b3.state.pos = [0.0,0,lengthOfBase/3.0]
O.bodies.append(b3)
b4=Body()
wire=False
color=[0,255,0]
highlight=False
b4.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.2*lengthOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[lengthOfBase/6.0-r,lengthOfBase/6.0-r,0.5*wallThickness-r,0.5*wallThickness-r,lengthOfBase/6.0-r,lengthOfBase/6.0-r], id=count,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),minAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(b4,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
b4.dynamic=False
b4.state.pos = [lengthOfBase/3.0,0,-lengthOfBase/3.0]
O.bodies.append(b4)
b5=Body()
wire=False
color=[0,255,0]
highlight=False
b5.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.2*lengthOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[lengthOfBase/6.0-r,lengthOfBase/6.0-r,0.5*wallThickness-r,0.5*wallThickness-r,lengthOfBase/6.0-r,lengthOfBase/6.0-r], id=count,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),minAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(b5,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
b5.dynamic=False
b5.state.pos = [0.0,0,-lengthOfBase/3.0]
O.bodies.append(b5)
b6=Body()
wire=False
color=[0,255,0]
highlight=False
b6.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.2*lengthOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[lengthOfBase/6.0-r,lengthOfBase/6.0-r,0.5*wallThickness-r,0.5*wallThickness-r,lengthOfBase/6.0-r,lengthOfBase/6.0-r], id=count,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),minAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(b6,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
b6.dynamic=False
b6.state.pos = [-lengthOfBase/3.0,0.0,-lengthOfBase/3.0]
O.bodies.append(b6)
b7=Body()
wire=False
color=[0,255,0]
highlight=False
b7.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.2*lengthOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[lengthOfBase/6.0-r,lengthOfBase/6.0-r,0.5*wallThickness-r,0.5*wallThickness-r,lengthOfBase/6.0-r,lengthOfBase/6.0-r], id=count,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),minAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(b7,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
b7.dynamic=False
b7.state.pos = [-lengthOfBase/3.0,0.0,0.0]
O.bodies.append(b7)
b8=Body()
wire=False
color=[0,255,0]
highlight=False
b8.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.2*lengthOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[lengthOfBase/6.0-r,lengthOfBase/6.0-r,0.5*wallThickness-r,0.5*wallThickness-r,lengthOfBase/6.0-r,lengthOfBase/6.0-r], id=count,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabb=1.02*Vector3(lengthOfBase/6.0,0.4*wallThickness,lengthOfBase/6.0),maxAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0),minAabbRotated=1.02*Vector3(lengthOfBase/6.0,0.5*wallThickness,lengthOfBase/6.0))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(b8,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
b8.dynamic=False
b8.state.pos = [lengthOfBase/3.0,0.0,0.0]
O.bodies.append(b8)
bA=Body()
wire=False
color=[0,255,0]
highlight=False
bA.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.5*heightOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[0.5*wallThickness-r,0.5*wallThickness-r,0.5*heightOfBase-r,0.5*heightOfBase-r,0.5*lengthOfBase-r,0.5*lengthOfBase-r], id=count+1,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(0.3*wallThickness,0.5*heightOfBase,0.5*lengthOfBase),maxAabb=1.02*Vector3(0.3*wallThickness,0.5*heightOfBase,0.5*lengthOfBase),maxAabbRotated=1.02*Vector3(0.5*wallThickness,0.5*heightOfBase,0.5*lengthOfBase),minAabbRotated=1.02*Vector3(0.5*wallThickness,0.5*heightOfBase,0.5*lengthOfBase))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(bA,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
bA.dynamic=False
bA.state.pos = [0.5*lengthOfBase,0.5*heightOfBase,0]
O.bodies.append(bA)
bB=Body()
wire=False
color=[0,255,0]
highlight=False
bB.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.5*heightOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[0.5*wallThickness-r,0.5*wallThickness-r,0.5*heightOfBase-r,0.5*heightOfBase-r,0.5*lengthOfBase-r,0.5*lengthOfBase-r], id=count+2,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(0.3*wallThickness,0.5*heightOfBase,0.5*lengthOfBase),maxAabb=1.02*Vector3(0.3*wallThickness,0.5*heightOfBase,0.5*lengthOfBase),maxAabbRotated=1.02*Vector3(0.5*wallThickness,0.5*heightOfBase,0.5*lengthOfBase),minAabbRotated=1.02*Vector3(0.5*wallThickness,0.5*heightOfBase,0.5*lengthOfBase))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(bB,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
bB.dynamic=False
bB.state.pos = [-0.5*lengthOfBase,0.5*heightOfBase,0]
O.bodies.append(bB)
bC=Body()
wire=False
color=[0,255,0]
highlight=False
bC.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.5*heightOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[0.5*lengthOfBase-r,0.5*lengthOfBase-r,0.5*heightOfBase-r,0.5*heightOfBase-r,0.5*wallThickness-r,0.5*wallThickness-r], id=count+3,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(0.5*lengthOfBase,0.5*heightOfBase,0.3*wallThickness),maxAabb=1.02*Vector3(0.5*lengthOfBase,0.5*heightOfBase,0.3*wallThickness),maxAabbRotated=1.02*Vector3(0.5*lengthOfBase,0.5*heightOfBase,0.5*wallThickness),minAabbRotated=1.02*Vector3(0.5*lengthOfBase,0.5*heightOfBase,0.5*wallThickness))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(bC,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
bC.dynamic=False
bC.state.pos = [0,0.5*heightOfBase,0.5*lengthOfBase]
O.bodies.append(bC)
bD=Body()
wire=False
color=[0,255,0]
highlight=False
bD.shape=PotentialBlock(k=0.1, r=0.1*wallThickness, R=0.5*heightOfBase,a=[1,-1,0,0,0,0], b=[0,0,1,-1,0,0], c=[0,0,0,0,1,-1], d=[0.5*lengthOfBase-r,0.5*lengthOfBase-r,0.5*heightOfBase-r,0.5*heightOfBase-r,0.5*wallThickness-r,0.5*wallThickness-r], id=count+4,isBoundary=True,isBoundaryPlane=[True,True,True,True,True,True],color=color ,wire=wire,highlight=highlight,AabbMinMax=True, minAabb=1.02*Vector3(0.5*lengthOfBase,0.5*heightOfBase,0.3*wallThickness),maxAabb=1.02*Vector3(0.5*lengthOfBase,0.5*heightOfBase,0.3*wallThickness),maxAabbRotated=1.02*Vector3(0.5*lengthOfBase,0.5*heightOfBase,0.5*wallThickness),minAabbRotated=1.02*Vector3(0.5*lengthOfBase,0.5*heightOfBase,0.5*wallThickness))
length=lengthOfBase
V=lengthOfBase*lengthOfBase*wallThickness
geomInert=(1./6.)*V*length*wallThickness
utils._commonBodySetup(bD,V,Vector3(geomInert,geomInert,geomInert), material='frictionless',pos=[0.0,0,0], noBound=False, resetState=True,fixed=True)
bD.dynamic=False
bD.state.pos = [0.0,0.5*heightOfBase,-0.5*lengthOfBase]
O.bodies.append(bD)
escapeNo=0
def myAddPlotData():
global escapeNo
global wallThickness
global meanSize
uf=utils.unbalancedForce()
if isnan(uf):
uf = 1.0
KE = utils.kineticEnergy()
for b in O.bodies:
if b.state.pos[1] < -5.0*meanSize and b.dynamic==True:
escapeNo = escapeNo+1
O.bodies.erase(b.id)
if O.iter>12000:
removeLid()
plot.addData(timeStep1=O.iter,timeStep2=O.iter,timeStep3=O.iter,timeStep4=O.iter,time=O.time,unbalancedForce=uf,kineticEn=KE,outsideNo=escapeNo)
from yade import plot
plot.plots={'timeStep1':('unbalancedForce'),'timeStep2':('kineticEn'),'time':('outsideNo')}
O.engines=O.engines+[PyRunner(iterPeriod=10,command='myAddPlotData()')]
def removeLid():
global lidID
if (O.bodies[lidID]):
O.bodies.erase(lidID)
O.engines=O.engines+[PotentialBlockVTKRecorder(fileName='/home/boon/yadeRev/trunk/examples/PotentialParticles/vtk/cubeScaled',iterPeriod=1000,sampleX=50,sampleY=50,sampleZ=50)]
#for b in O.bodies:
# b.state.blockedDOFs=['rx','ry','rz','x','z']
#O.bodies[0].state.pos = [0,meanSize*10.0,0]
#O.bodies[0].state.vel =[0,0.0,0]
O.dt = 0.2*sqrt(O.bodies[0].state.mass*0.33333333/1.0e8)
#from yade import qt
#qt.Controller()
#qt.View()
#Gl1_PotentialBlock.sizeX = 30
#Gl1_PotentialBlock.sizeY = 30
#Gl1_PotentialBlock.sizeZ = 30
#from yade import qt
#qt.View()
import yade.timing
O.timingEnabled = True
yade.timing.reset()
#O.engines[2].geomDispatcher.functors[0].timingDeltas.data
#yade.timing.stats()
|
anna-effeindzourou/trunk
|
examples/PotentialBlocks/cubePBscaled.py
|
Python
|
gpl-2.0
| 18,439
|
[
"VTK"
] |
80d14675cc24b223bb4bd4c2ac6f4096159ad06d3b01c885d7830a10329e00e9
|
#=======================================================================
# Model.py
#=======================================================================
"""Base modeling components for building hardware description models.
This module contains a collection of classes that can be used to construct
PyMTL (pronounced py-metal) models. Once constructed, a PyMTL model can be
leveraged by a number of PyMTL tools for various purposes (simulation,
translation into HDLs, etc).
"""
from metaclasses import MetaCollectArgs
from ConnectionEdge import ConnectionEdge, PyMTLConnectError
from signals import Signal, InPort, OutPort, Wire, Constant
from signal_lists import PortList, WireList
from PortBundle import PortBundle
from ..datatypes import Bits
#from physical import PhysicalDimensions
import collections
import inspect
import warnings
import math
#=======================================================================
# Model
#=======================================================================
class Model( object ):
"""Base class for PyMTL hardware models.
Provides utility classes for elaborating connectivity between
components, giving instantiated subcomponents proper names, and
building datastructures that can be leveraged by various tools.
Any user implemented model that wishes to make use of the various
PyMTL tools should subclass this.
"""
__metaclass__ = MetaCollectArgs
_debug = False
#=====================================================================
# Modeling API
#=====================================================================
#---------------------------------------------------------------------
# __new__
#---------------------------------------------------------------------
def __new__( cls, *args, **kwargs ):
"""Pre-constructor adds default 'clk' and 'reset' Signals to PyMTL.
We use __new__ instead of __init__ for this purpose so that user's
do not need to explicitly call the super constructor in their Model
definitions: ``super( Model, self ).__init__()`` is too ugly!
"""
# Instantiate a new Python object
inst = object.__new__( cls, *args, **kwargs )
# Add implicit clk and reset ports
inst.clk = InPort( 1 )
inst.reset = InPort( 1 )
# Initialize internal datastructures
inst._tick_blocks = []
inst._posedge_clk_blocks = []
inst._combinational_blocks = []
inst._connections = set()
return inst
#---------------------------------------------------------------------
# connect
#---------------------------------------------------------------------
def connect( self, left_port, right_port ):
"""Structurally connect a Signal to another Signal or a constant, or
two PortBundles with the same interface.
Ports structurally connected with s.connect are guaranteed to have
the same value during simulation:
>>> s.connect( s.port_a, s.port_b )
This works for slices as well:
>>> s.connect( s.port_c[0:4], s.port_d[4:8] )
A signal connected to a constant will be tied to that value:
>>> s.connect( s.port_e, 8 )
Several Signals can be structurally connected with a single
statement by encapsulating them in PortBundles:
>>> s.connect( s.my_bundle_a, s.my_bundle_b )
"""
# Throw an error if connect() is used on two wires.
if isinstance( left_port, Wire ) and isinstance( right_port, Wire ):
e = ('Connecting two Wire signals is not supported!\n'
'If you are certain this is something you really need to do, '
'use connect_wire instead:\n\n '
' # translation will fail if directionality is wrong!!!\n'
' connect_wire( dest = <s.out>, src = <s.in_> )'
)
frame, filename, lineno, func_name, lines, idx = inspect.stack()[1]
msg = '{}\n\nLine: {} in File: {}\n>'.format( e, lineno, filename )
msg += '>'.join( lines )
raise PyMTLConnectError( msg )
# Try to connect the two signals/portbundles
try:
if isinstance( left_port, PortBundle ):
self._connect_bundle( left_port, right_port )
else:
self._connect_signal( left_port, right_port )
# Throw a helpful error on failure
except PyMTLConnectError as e:
frame, filename, lineno, func_name, lines, idx = inspect.stack()[1]
msg = '{}\n\nLine: {} in File: {}\n>'.format( e, lineno, filename )
msg += '>'.join( lines )
raise PyMTLConnectError( msg )
#-----------------------------------------------------------------------
# connect_pairs
#-----------------------------------------------------------------------
def connect_pairs( self, *connections ):
"""Structurally connect pairs of signals.
This provides a more concise syntax for interconnecting large
numbers of Signals by allowing the user to supply a series of signals
which are to be connected pairwise.
>>> s.connect_pairs(
>>> s.mod.a, s.a,
>>> s.mod.b, 0b11,
>>> s.mod.c, s.x[4:5],
>>> )
Alternatively, a list of signals can be created and passed in:
>>> s.connect_pairs( *signal_list )
"""
# Throw an error if user tries to connect an odd number of signals
if len( connections ) & 1:
e = 'An odd number of signals were provided!'
frame, filename, lineno, func_name, lines, idx = inspect.stack()[1]
msg = '{}\n\nLine: {} in File: {}\n>'.format( e, lineno, filename )
msg += '>'.join( lines )
raise PyMTLConnectError( msg )
# Iterate through signals pairwise
citer = iter( connections )
for left_port, right_port in zip( citer, citer ):
try:
self.connect( left_port, right_port )
# Throw a helpful error if any pairwise connection fails
except PyMTLConnectError as e:
frame, filename, lineno, func_name, lines, idx = inspect.stack()[1]
msg = '{}\n\nLine: {} in File: {}\n>'.format( e, lineno, filename )
msg += '>'.join( lines )
raise PyMTLConnectError( msg )
#-----------------------------------------------------------------------
# connect_dict
#-----------------------------------------------------------------------
def connect_dict( self, connections ):
"""Structurally connect Signals given a dictionary mapping.
This provides a more concise syntax for interconnecting large
numbers of Signals by using a dictionary to specify Signal-to-Signal
connection mapping.
>>> s.connect_dict({
>>> s.mod.a : s.a,
>>> s.mod.b : 0b11,
>>> s.mod.c : s.x[4:5],
>>> })
"""
for left_port, right_port in connections.iteritems():
try:
self.connect( left_port, right_port )
# Throw a helpful error if any pairwise connection fails
except PyMTLConnectError as e:
frame, filename, lineno, func_name, lines, idx = inspect.stack()[1]
msg = '{}\n\nLine: {} in File: {}\n>'.format( e, lineno, filename )
msg += '>'.join( lines )
raise PyMTLConnectError( msg )
#---------------------------------------------------------------------
# connect_wire
#---------------------------------------------------------------------
def connect_wire( self, dest=None, src=None ):
"""Structurally connect two Wires where direction must be specified.
Directly connecting two Wires is not encouraged in PyMTL modeling.
Typically, intermediate Wire objects should not be needed for
structural connectivity since Ports can be directly connected, and
Wires are primarily intended for communicating values between
behavioral blocks within a Model.
If you are certain you want to structurally connect two Wires, you
must use connect_wire() to explicitly specify the directionality.
>>> s.connect_wire( dest = s.port_a, src = s.port_b )
"""
self._connect_signal( src, dest ) # expects the src first
#-----------------------------------------------------------------------
# connect_auto
#-----------------------------------------------------------------------
def connect_auto( self, m1, m2 = None ):
"""EXPERIMENTAL. Try to connect all InPort and OutPorts with the
same name between two models.
Note that this is not heavily tested or stable, you probably don't
want to use this...
"""
if not hasattr( self, '_auto_connects' ):
self._auto_connects = []
self._auto_connects.append((m1,m2))
#---------------------------------------------------------------------
# tick_fl
#---------------------------------------------------------------------
def tick_fl( self, func ):
"""Decorator to mark a functional-level sequential function.
Logic blocks marked with @s.tick_fl fire every clock cycle and may
utilize components using the greenlet concurrency library. They are
not Verilog translatable.
>>> @s.tick_fl
>>> def my_logic()
>>> s.out.next = s.in_
"""
return self.tick( func )
#---------------------------------------------------------------------
# tick_cl
#---------------------------------------------------------------------
def tick_cl( self, func ):
"""Decorator to mark a cycle-level sequential logic.
Logic blocks marked with @s.tick_cl fire every clock cycle. They
cannot take advantage of greenlets-enabled library components, nor
are they Verilog translatable.
>>> @s.tick_cl
>>> def my_logic()
>>> s.out.next = s.in_
"""
return self.tick( func )
#---------------------------------------------------------------------
# tick_rtl
#---------------------------------------------------------------------
def tick_rtl( self, func ):
"""Decorator to mark register-transfer level sequential logic.
Logic blocks marked with @s.tick_cl fire every clock cycle, and are
meant to be Verilog translatable. Note that translation only works
if a limited, translatable subset of Python is used.
>>> @s.tick_rtl
>>> def my_logic()
>>> s.out.next = s.in_
"""
return self.posedge_clk( func )
#---------------------------------------------------------------------
# combinational
#---------------------------------------------------------------------
def combinational( self, func ):
"""Decorator to annotate an RTL combinational function.
Logic blocks marked with @s.combinational only fire when the value
of Signals in their sensitivity list change. This sensitiv
>>> @s.combinational
>>> def my_logic()
>>> s.out.value = s.in_
"""
# DEBUG, make permanent to aid debugging?
#func.func_name = self.name + '.' + func.func_name
self._combinational_blocks.append( func )
func._model = self
return func
#---------------------------------------------------------------------
# posedge_clk
#---------------------------------------------------------------------
def posedge_clk( self, func ):
"""Decorator to mark register-transfer level sequential logic.
(This is an alias for @tick_rtl).
>>> @s.posedge_clk
>>> def my_logic()
>>> s.out.next = s.in_
"""
self._posedge_clk_blocks.append( func )
func._model = self
return func
#---------------------------------------------------------------------
# tick
#---------------------------------------------------------------------
def tick( self, func ):
"""Decorator to mark a cycle-level sequential logic.
(This is an alias for @tick_cl).
"""
self._tick_blocks.append( func )
func._model = self
return func
#-----------------------------------------------------------------------
# line_trace
#-----------------------------------------------------------------------
def line_trace( self ):
"""Returns a one-line string concisely describing Model state for
the current simulation cycle.
Model subclasses should implement this method if they would like
useful debug output when line-tracing is enabled in the simulator.
"""
return ""
#---------------------------------------------------------------------
# elaborate_logic
#---------------------------------------------------------------------
def elaborate_logic( self ):
"""DEPRECATED. An abstract method Model subclasses previously
defined to implement elaboration logic.
"""
pass
#=====================================================================
# Tool API
#=====================================================================
#---------------------------------------------------------------------
# elaborate
#---------------------------------------------------------------------
def elaborate( self ):
"""Elaborate a PyMTL model (construct hierarchy, name modules, etc).
The elaborate() function **must** be called on any instantiated
top-level model before it is passed to any PyMTL tools.
"""
# Initialize data structure to hold all model classes in the design
self._model_classes = set()
# Recursively elaborate each model in the design, starting with top
self._recurse_elaborate( self, 'top' )
# Visit all connections in the design, set directionality
self._recurse_connections()
#-----------------------------------------------------------------------
# is_elaborated
#-----------------------------------------------------------------------
def is_elaborated( self):
"""Returns 'True' is elaborate() has been called on this Model."""
return hasattr( self, 'class_name' )
#---------------------------------------------------------------------
# Getters
#---------------------------------------------------------------------
def get_inports( self ):
"""Get a list of all InPorts defined by this model."""
return self._inports
def get_outports( self ):
"""Get a list of all OutPorts defined by this model."""
return self._outports
def get_ports( self, preserve_hierarchy=False ):
"""Get a list of all InPorts and OutPorts defined by this model.
By default this list is a flattened list of all InPort and OutPort
objects, including those within PortBundles and PortLists. To return
a list which preserves this hierarchy, use preserve_hierarchy=True.
"""
if not preserve_hierarchy:
return self._inports + self._outports
else:
return self._hports
def get_wires( self ):
"""Get a list of all Wires defined in this model."""
return self._wires
def get_submodules( self ):
"""Get a list of all child Models instaniated in this model."""
return self._submodules
def get_connections( self ):
"""Get a list of all structural connections made in this model."""
return self._connections
def get_tick_blocks( self ):
"""Get a list of all sequential (@tick_*) blocks defined in this
model."""
return self._tick_blocks
def get_posedge_clk_blocks( self ):
"""Get a list of all sequential (@tick_rtl) blocks defined in this
model."""
return self._posedge_clk_blocks
def get_combinational_blocks( self ):
"""Get a list of all combinational (@combinational) blocks defined
in this model."""
return self._combinational_blocks
#=====================================================================
# Internal Methods
#=====================================================================
#---------------------------------------------------------------------
# _recurse_elaborate
#---------------------------------------------------------------------
def _recurse_elaborate( self, current_model, instance_name ):
"""Use reflection to set model attributes and elaborate
submodels."""
if current_model.is_elaborated():
raise Exception("Model {} has already been elaborated!"
.format( current_model.class_name ) )
# Add the target model to the set of all models
self._model_classes.add( current_model )
# Set Public Attributes
current_model.class_name = self._gen_class_name( current_model )
current_model.parent = None
current_model.name = instance_name
# DEPRECATED: Call user implemented elaborate_logic() function
current_model.elaborate_logic()
# Initialize lists for signals, submodules and connections
current_model._wires = []
current_model._inports = []
current_model._outports = []
current_model._hports = []
current_model._submodules = []
# Disable line tracing by default
current_model._line_trace_en = False
if not hasattr( current_model, '_newsenses' ):
current_model._newsenses = collections.defaultdict( list )
# Inspect all user defined model attributes (signals, signal lists,
# submodels, etc). Set their names, parents, and add them to the
# appropriate private attribute lists.
# TODO: do all ports first?
for name, obj in current_model.__dict__.items():
if not name.startswith( '_' ):
self._check_type( current_model, name, obj )
if hasattr( current_model, '_auto_connects' ):
current_model._auto_connect()
#---------------------------------------------------------------------
# _check_type
#---------------------------------------------------------------------
def _check_type( self, current_model, name, obj, nested=False ):
"""Specialize elaboration actions based on object type."""
if isinstance( obj, Wire ):
obj.name = name
obj.parent = current_model
current_model._wires += [ obj ]
elif isinstance( obj, InPort ):
obj.name = name
obj.parent = current_model
current_model._inports += [ obj ]
if not nested:
current_model._hports += [ obj ]
elif isinstance( obj, OutPort ):
obj.name = name
obj.parent = current_model
current_model._outports += [ obj ]
if not nested:
current_model._hports += [ obj ]
# TODO: clean this up...
elif isinstance( obj, PortBundle ):
obj.name = name
for port in obj.get_ports():
self._check_type(current_model, name+'.'+port.name, port, nested=True)
if not nested:
current_model._hports += [ obj ]
# Submodules
elif isinstance( obj, Model ):
# TODO: remove, throw an exception in _recurse_elaborate
if obj.is_elaborated():
warnings.warn( "Model '{}::{}' has two parents!!!"
.format( obj.__class__.__name__, name ) )
return
# Recursively call elaborate() on the submodule
self._recurse_elaborate( obj, name )
# Set attributes
obj.parent = current_model
current_model._submodules += [ obj ]
# Structurally connect the clk and reset signals
obj.parent.connect( obj.clk, obj.parent.clk )
obj.parent.connect( obj.reset, obj.parent.reset )
# Lists of Signals
elif isinstance( obj, list ):
if obj and isinstance( obj[0], Wire):
obj = WireList( obj )
obj.name = name
assert '.' not in name
setattr( current_model, name, obj )
if obj and isinstance( obj[0], (InPort,OutPort, PortBundle)):
temp = PortList( obj )
temp._ports = obj
obj = temp
obj.name = name
assert '.' not in name
setattr( current_model, name, obj )
if not nested:
current_model._hports += [ obj ]
# Iterate through each item in the list and recursively call the
# _check_type() utility function
for i, item in enumerate(obj):
item_name = "%s[%d]" % (name, i)
self._check_type( current_model, item_name, item, nested=True )
#---------------------------------------------------------------------
# _gen_class_name
#---------------------------------------------------------------------
def _gen_class_name( self, model ):
"""Generate a unique class name for model instances."""
# Base name is always just the class name
name = model.__class__.__name__
# TODO: huge hack, fix this!
for key, value in model._args.items():
if isinstance( value, Bits.Bits ):
#print('\nWARNING: assuming Bits parameter wants .nbits, not .value')
model._args[ key ] = value.nbits
# Generate a unique name for the Model instance based on its params
# http://stackoverflow.com/a/5884123
try:
hashables = frozenset({ x for x in model._args.items()
if isinstance( x[1], collections.Hashable ) })
suffix = abs( hash( hashables ) )
return name + '_' + hex( suffix )
# No _args attribute, so no need to create a specialized name
except AttributeError:
return name
#---------------------------------------------------------------------
# _recurse_connections
#---------------------------------------------------------------------
# Set the directionality on all connections in the design of a Model.
def _recurse_connections(self):
"""Set the directionality on all connections in the model."""
# Set direction of all connections
for c in self._connections:
c.set_edge_direction()
# Recursively enter submodules
for submodule in self._submodules:
submodule._recurse_connections()
#---------------------------------------------------------------------
# _connect_signal
#---------------------------------------------------------------------
def _connect_signal( self, left_port, right_port ):
"""Connect a single pair of Signal objects."""
# Can't connect a port to itself!
assert left_port != right_port
# Create the connection
connection_edge = ConnectionEdge( left_port, right_port )
# Add the connection to the Model's connection list
if not connection_edge:
raise Exception( "Invalid Connection!")
self._connections.add( connection_edge )
#-----------------------------------------------------------------------
# _connect_bundle
#-----------------------------------------------------------------------
def _connect_bundle( self, left_bundle, right_bundle ):
"""Connect all Signal object pairs in a PortBundle."""
# Can't connect a port to itself!
assert left_bundle != right_bundle
ports = zip( left_bundle.get_ports(), right_bundle.get_ports() )
for left, right in ports:
self._connect_signal( left, right )
#-----------------------------------------------------------------------
# _auto_connect
#-----------------------------------------------------------------------
def _auto_connect(self):
"""Connect all InPorts and OutPorts in the parent and one or more
child modules based on the port name."""
def port_dict( lst ):
dictionary = {}
for port in lst:
if not port.name == 'clk' and not port.name == 'reset':
dictionary[port.name] = port
return dictionary
for m1,m2 in self._auto_connects:
dict1 = port_dict( self.get_inports()+self.get_outports() )
dict2 = port_dict( m1.get_ports() )
dict3 = port_dict( m2.get_ports() if m2 is not None else [] )
for key in dict1:
if key in dict2:
self.connect(dict1[key],dict2[key])
del dict2[key]
if key in dict3:
self.connect(dict1[key],dict3[key])
del dict3[key]
for key in dict3:
if key in dict2:
self.connect(dict2[key], dict3[key])
|
Abhinav117/pymtl
|
pymtl/model/Model.py
|
Python
|
bsd-3-clause
| 23,622
|
[
"VisIt"
] |
655a4587036815a476ba4dff20857bf647e1fc4d046c612e37369a7d0d3c5f63
|
"""
pygments.lexers.boa
~~~~~~~~~~~~~~~~~~~
Lexers for the Boa language.
:copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, words
from pygments.token import String, Comment, Keyword, Name, Number, Text, \
Operator, Punctuation
__all__ = ['BoaLexer']
line_re = re.compile('.*?\n')
class BoaLexer(RegexLexer):
"""
Lexer for the `Boa <http://boa.cs.iastate.edu/docs/>`_ language.
.. versionadded:: 2.4
"""
name = 'Boa'
aliases = ['boa']
filenames = ['*.boa']
reserved = words(
('input', 'output', 'of', 'weight', 'before', 'after', 'stop',
'ifall', 'foreach', 'exists', 'function', 'break', 'switch', 'case',
'visitor', 'default', 'return', 'visit', 'while', 'if', 'else'),
suffix=r'\b', prefix=r'\b')
keywords = words(
('bottom', 'collection', 'maximum', 'mean', 'minimum', 'set', 'sum',
'top', 'string', 'int', 'bool', 'float', 'time', 'false', 'true',
'array', 'map', 'stack', 'enum', 'type'), suffix=r'\b', prefix=r'\b')
classes = words(
('Project', 'ForgeKind', 'CodeRepository', 'Revision', 'RepositoryKind',
'ChangedFile', 'FileKind', 'ASTRoot', 'Namespace', 'Declaration', 'Type',
'Method', 'Variable', 'Statement', 'Expression', 'Modifier',
'StatementKind', 'ExpressionKind', 'ModifierKind', 'Visibility',
'TypeKind', 'Person', 'ChangeKind'),
suffix=r'\b', prefix=r'\b')
operators = ('->', ':=', ':', '=', '<<', '!', '++', '||',
'&&', '+', '-', '*', ">", "<")
string_sep = ('`', '\"')
built_in_functions = words(
(
# Array functions
'new', 'sort',
# Date & Time functions
'yearof', 'dayofyear', 'hourof', 'minuteof', 'secondof', 'now',
'addday', 'addmonth', 'addweek', 'addyear', 'dayofmonth', 'dayofweek',
'dayofyear', 'formattime', 'trunctoday', 'trunctohour', 'trunctominute',
'trunctomonth', 'trunctosecond', 'trunctoyear',
# Map functions
'clear', 'haskey', 'keys', 'lookup', 'remove', 'values',
# Math functions
'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh',
'ceil', 'cos', 'cosh', 'exp', 'floor', 'highbit', 'isfinite', 'isinf',
'isnan', 'isnormal', 'log', 'log10', 'max', 'min', 'nrand', 'pow',
'rand', 'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc',
# Other functions
'def', 'hash', 'len',
# Set functions
'add', 'contains', 'remove',
# String functions
'format', 'lowercase', 'match', 'matchposns', 'matchstrs', 'regex',
'split', 'splitall', 'splitn', 'strfind', 'strreplace', 'strrfind',
'substring', 'trim', 'uppercase',
# Type Conversion functions
'bool', 'float', 'int', 'string', 'time',
# Domain-Specific functions
'getast', 'getsnapshot', 'hasfiletype', 'isfixingrevision', 'iskind',
'isliteral',
),
prefix=r'\b',
suffix=r'\(')
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(reserved, Keyword.Reserved),
(built_in_functions, Name.Function),
(keywords, Keyword.Type),
(classes, Name.Classes),
(words(operators), Operator),
(r'[][(),;{}\\.]', Punctuation),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
(r"`(\\\\|\\[^\\]|[^`\\])*`", String.Backtick),
(words(string_sep), String.Delimiter),
(r'[a-zA-Z_]+', Name.Variable),
(r'[0-9]+', Number.Integer),
(r'\s+?', Text), # Whitespace
]
}
|
sonntagsgesicht/regtest
|
.aux/venv/lib/python3.9/site-packages/pygments/lexers/boa.py
|
Python
|
apache-2.0
| 3,946
|
[
"VisIt"
] |
a78f9d0bd0961c3d1caf93157ed63b1f5d53ad7b6af7c3b80db113749809985d
|
"""
Validator project
admin.py
Copyright (c) 2009 Brian Shumate
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from surfbot.validator.models import Website
from django.contrib import admin
class WebsiteAdmin(admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['org']}),
(None, {'fields': ['rooturl']}),
# ('Validation', {'fields': ['checkok', 'htmlval', 'cssval', 'accessval', 'linksval'], 'classes': ['collapse']}),
('Validation', {'fields': ['checkok', 'htmlval', 'cssval', 'accessval', 'linksval']}),
('Metrics', {'fields': ['lastcheck', 'nextcheck','checktotal']}),
]
admin.site.register(Website, WebsiteAdmin)
|
brianshumate/uniweb
|
surfbot/validator/admin.py
|
Python
|
bsd-2-clause
| 1,649
|
[
"Brian"
] |
09bbfe7d25c21030990a2f09a6f8951a639d5d983d796b279c97ab1142bafa9f
|
# sql/elements.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Core SQL expression elements, including :class:`.ClauseElement`,
:class:`.ColumnElement`, and derived classes.
"""
from __future__ import unicode_literals
from .. import util, exc, inspection
from . import type_api
from . import operators
from .visitors import Visitable, cloned_traverse, traverse
from .annotation import Annotated
import itertools
from .base import Executable, PARSE_AUTOCOMMIT, Immutable, NO_ARG
from .base import _generative, Generative
import re
import operator
def _clone(element, **kw):
return element._clone()
def collate(expression, collation):
"""Return the clause ``expression COLLATE collation``.
e.g.::
collate(mycolumn, 'utf8_bin')
produces::
mycolumn COLLATE utf8_bin
"""
expr = _literal_as_binds(expression)
return BinaryExpression(
expr,
_literal_as_text(collation),
operators.collate, type_=expr.type)
def between(expr, lower_bound, upper_bound):
"""Produce a ``BETWEEN`` predicate clause.
E.g.::
from sqlalchemy import between
stmt = select([users_table]).where(between(users_table.c.id, 5, 7))
Would produce SQL resembling::
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
The :func:`.between` function is a standalone version of the
:meth:`.ColumnElement.between` method available on all
SQL expressions, as in::
stmt = select([users_table]).where(users_table.c.id.between(5, 7))
All arguments passed to :func:`.between`, including the left side
column expression, are coerced from Python scalar values if a
the value is not a :class:`.ColumnElement` subclass. For example,
three fixed values can be compared as in::
print(between(5, 3, 7))
Which would produce::
:param_1 BETWEEN :param_2 AND :param_3
:param expr: a column expression, typically a :class:`.ColumnElement`
instance or alternatively a Python scalar expression to be coerced
into a column expression, serving as the left side of the ``BETWEEN``
expression.
:param lower_bound: a column or Python scalar expression serving as the lower
bound of the right side of the ``BETWEEN`` expression.
:param upper_bound: a column or Python scalar expression serving as the
upper bound of the right side of the ``BETWEEN`` expression.
.. seealso::
:meth:`.ColumnElement.between`
"""
expr = _literal_as_binds(expr)
return expr.between(lower_bound, upper_bound)
def literal(value, type_=None):
"""Return a literal clause, bound to a bind parameter.
Literal clauses are created automatically when non- :class:`.ClauseElement`
objects (such as strings, ints, dates, etc.) are used in a comparison
operation with a :class:`.ColumnElement`
subclass, such as a :class:`~sqlalchemy.schema.Column` object.
Use this function to force the
generation of a literal clause, which will be created as a
:class:`BindParameter` with a bound value.
:param value: the value to be bound. Can be any Python object supported by
the underlying DB-API, or is translatable via the given type argument.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which
will provide bind-parameter translation for this literal.
"""
return BindParameter(None, value, type_=type_, unique=True)
def type_coerce(expression, type_):
"""Associate a SQL expression with a particular type, without rendering
``CAST``.
E.g.::
from sqlalchemy import type_coerce
stmt = select([type_coerce(log_table.date_string, StringDateTime())])
The above construct will produce SQL that is usually otherwise unaffected
by the :func:`.type_coerce` call::
SELECT date_string FROM log
However, when result rows are fetched, the ``StringDateTime`` type
will be applied to result rows on behalf of the ``date_string`` column.
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
For example, if a type implements the :meth:`.TypeEngine.bind_expression`
method or :meth:`.TypeEngine.bind_processor` method or equivalent,
these functions will take effect at statement compliation/execution time
when a literal value is passed, as in::
# bound-value handling of MyStringType will be applied to the
# literal value "some string"
stmt = select([type_coerce("some string", MyStringType)])
:func:`.type_coerce` is similar to the :func:`.cast` function,
except that it does not render the ``CAST`` expression in the resulting
statement.
:param expression: A SQL expression, such as a :class:`.ColumnElement` expression
or a Python string which will be coerced into a bound literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the the expression is coerced.
.. seealso::
:func:`.cast`
"""
type_ = type_api.to_instance(type_)
if hasattr(expression, '__clause_element__'):
return type_coerce(expression.__clause_element__(), type_)
elif isinstance(expression, BindParameter):
bp = expression._clone()
bp.type = type_
return bp
elif not isinstance(expression, Visitable):
if expression is None:
return Null()
else:
return literal(expression, type_=type_)
else:
return Label(None, expression, type_=type_)
def outparam(key, type_=None):
"""Create an 'OUT' parameter for usage in functions (stored procedures),
for databases which support them.
The ``outparam`` can be used like a regular function parameter.
The "output" value will be available from the
:class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters``
attribute, which returns a dictionary containing the values.
"""
return BindParameter(
key, None, type_=type_, unique=False, isoutparam=True)
def not_(clause):
"""Return a negation of the given clause, i.e. ``NOT(clause)``.
The ``~`` operator is also overloaded on all
:class:`.ColumnElement` subclasses to produce the
same result.
"""
return operators.inv(_literal_as_binds(clause))
@inspection._self_inspects
class ClauseElement(Visitable):
"""Base class for elements of a programmatically constructed SQL
expression.
"""
__visit_name__ = 'clause'
_annotations = {}
supports_execution = False
_from_objects = []
bind = None
_is_clone_of = None
is_selectable = False
is_clause_element = True
_order_by_label_element = None
def _clone(self):
"""Create a shallow copy of this ClauseElement.
This method may be used by a generative API. Its also used as
part of the "deep" copy afforded by a traversal that combines
the _copy_internals() method.
"""
c = self.__class__.__new__(self.__class__)
c.__dict__ = self.__dict__.copy()
ClauseElement._cloned_set._reset(c)
ColumnElement.comparator._reset(c)
# this is a marker that helps to "equate" clauses to each other
# when a Select returns its list of FROM clauses. the cloning
# process leaves around a lot of remnants of the previous clause
# typically in the form of column expressions still attached to the
# old table.
c._is_clone_of = self
return c
@property
def _constructor(self):
"""return the 'constructor' for this ClauseElement.
This is for the purposes for creating a new object of
this type. Usually, its just the element's __class__.
However, the "Annotated" version of the object overrides
to return the class of its proxied element.
"""
return self.__class__
@util.memoized_property
def _cloned_set(self):
"""Return the set consisting all cloned ancestors of this
ClauseElement.
Includes this ClauseElement. This accessor tends to be used for
FromClause objects to identify 'equivalent' FROM clauses, regardless
of transformative operations.
"""
s = util.column_set()
f = self
while f is not None:
s.add(f)
f = f._is_clone_of
return s
def __getstate__(self):
d = self.__dict__.copy()
d.pop('_is_clone_of', None)
return d
def _annotate(self, values):
"""return a copy of this ClauseElement with annotations
updated by the given dictionary.
"""
return Annotated(self, values)
def _with_annotations(self, values):
"""return a copy of this ClauseElement with annotations
replaced by the given dictionary.
"""
return Annotated(self, values)
def _deannotate(self, values=None, clone=False):
"""return a copy of this :class:`.ClauseElement` with annotations
removed.
:param values: optional tuple of individual values
to remove.
"""
if clone:
# clone is used when we are also copying
# the expression for a deep deannotation
return self._clone()
else:
# if no clone, since we have no annotations we return
# self
return self
def _execute_on_connection(self, connection, multiparams, params):
return connection._execute_clauseelement(self, multiparams, params)
def unique_params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Same functionality as ``params()``, except adds `unique=True`
to affected bind parameters so that multiple statements can be
used.
"""
return self._params(True, optionaldict, kwargs)
def params(self, *optionaldict, **kwargs):
"""Return a copy with :func:`bindparam()` elements replaced.
Returns a copy of this ClauseElement with :func:`bindparam()`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> print clause.compile().params
{'foo':None}
>>> print clause.params({'foo':7}).compile().params
{'foo':7}
"""
return self._params(False, optionaldict, kwargs)
def _params(self, unique, optionaldict, kwargs):
if len(optionaldict) == 1:
kwargs.update(optionaldict[0])
elif len(optionaldict) > 1:
raise exc.ArgumentError(
"params() takes zero or one positional dictionary argument")
def visit_bindparam(bind):
if bind.key in kwargs:
bind.value = kwargs[bind.key]
bind.required = False
if unique:
bind._convert_to_unique()
return cloned_traverse(self, {}, {'bindparam': visit_bindparam})
def compare(self, other, **kw):
"""Compare this ClauseElement to the given ClauseElement.
Subclasses should override the default behavior, which is a
straight identity comparison.
\**kw are arguments consumed by subclass compare() methods and
may be used to modify the criteria for comparison.
(see :class:`.ColumnElement`)
"""
return self is other
def _copy_internals(self, clone=_clone, **kw):
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
pass
def get_children(self, **kwargs):
"""Return immediate child elements of this :class:`.ClauseElement`.
This is used for visit traversal.
\**kwargs may contain flags that change the collection that is
returned, for example to return a subset of items in order to
cut down on larger traversals, or to return child items from a
different context (such as schema-level collections instead of
clause-level).
"""
return []
def self_group(self, against=None):
"""Apply a 'grouping' to this :class:`.ClauseElement`.
This method is overridden by subclasses to return a
"grouping" construct, i.e. parenthesis. In particular
it's used by "binary" expressions to provide a grouping
around themselves when placed into a larger expression,
as well as by :func:`.select` constructs when placed into
the FROM clause of another :func:`.select`. (Note that
subqueries should be normally created using the
:meth:`.Select.alias` method, as many platforms require
nested SELECT statements to be named).
As expressions are composed together, the application of
:meth:`self_group` is automatic - end-user code should never
need to use this method directly. Note that SQLAlchemy's
clause constructs take operator precedence into account -
so parenthesis might not be needed, for example, in
an expression like ``x OR (y AND z)`` - AND takes precedence
over OR.
The base :meth:`self_group` method of :class:`.ClauseElement`
just returns self.
"""
return self
@util.dependencies("sqlalchemy.engine.default")
def compile(self, default, bind=None, dialect=None, **kw):
"""Compile this SQL expression.
The return value is a :class:`~.Compiled` object.
Calling ``str()`` or ``unicode()`` on the returned value will yield a
string representation of the result. The
:class:`~.Compiled` object also can return a
dictionary of bind parameter names and values
using the ``params`` accessor.
:param bind: An ``Engine`` or ``Connection`` from which a
``Compiled`` will be acquired. This argument takes precedence over
this :class:`.ClauseElement`'s bound engine, if any.
:param column_keys: Used for INSERT and UPDATE statements, a list of
column names which should be present in the VALUES clause of the
compiled statement. If ``None``, all columns from the target table
object are rendered.
:param dialect: A ``Dialect`` instance from which a ``Compiled``
will be acquired. This argument takes precedence over the `bind`
argument as well as this :class:`.ClauseElement`'s bound engine, if
any.
:param inline: Used for INSERT statements, for a dialect which does
not support inline retrieval of newly generated primary key
columns, will force the expression used to create the new primary
key value to be rendered inline within the INSERT statement's
VALUES clause. This typically refers to Sequence execution but may
also refer to any server-side default generation function
associated with a primary key `Column`.
"""
if not dialect:
if bind:
dialect = bind.dialect
elif self.bind:
dialect = self.bind.dialect
bind = self.bind
else:
dialect = default.DefaultDialect()
return self._compiler(dialect, bind=bind, **kw)
def _compiler(self, dialect, **kw):
"""Return a compiler appropriate for this ClauseElement, given a
Dialect."""
return dialect.statement_compiler(dialect, self, **kw)
def __str__(self):
if util.py3k:
return str(self.compile())
else:
return unicode(self.compile()).encode('ascii', 'backslashreplace')
def __and__(self, other):
return and_(self, other)
def __or__(self, other):
return or_(self, other)
def __invert__(self):
if hasattr(self, 'negation_clause'):
return self.negation_clause
else:
return self._negate()
def __bool__(self):
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
def _negate(self):
return UnaryExpression(
self.self_group(against=operators.inv),
operator=operators.inv,
negate=None)
def __repr__(self):
friendly = getattr(self, 'description', None)
if friendly is None:
return object.__repr__(self)
else:
return '<%s.%s at 0x%x; %s>' % (
self.__module__, self.__class__.__name__, id(self), friendly)
class ColumnElement(ClauseElement, operators.ColumnOperators):
"""Represent a column-oriented SQL expression suitable for usage in the
"columns" clause, WHERE clause etc. of a statement.
While the most familiar kind of :class:`.ColumnElement` is the
:class:`.Column` object, :class:`.ColumnElement` serves as the basis
for any unit that may be present in a SQL expression, including
the expressions themselves, SQL functions, bound parameters,
literal expressions, keywords such as ``NULL``, etc.
:class:`.ColumnElement` is the ultimate base class for all such elements.
A wide variety of SQLAlchemy Core functions work at the SQL expression level,
and are intended to accept instances of :class:`.ColumnElement` as arguments.
These functions will typically document that they accept a "SQL expression"
as an argument. What this means in terms of SQLAlchemy usually refers
to an input which is either already in the form of a :class:`.ColumnElement`
object, or a value which can be **coerced** into one. The coercion
rules followed by most, but not all, SQLAlchemy Core functions with regards
to SQL expressions are as follows:
* a literal Python value, such as a string, integer or floating
point value, boolean, datetime, ``Decimal`` object, or virtually
any other Python object, will be coerced into a "literal bound value".
This generally means that a :func:`.bindparam` will be produced
featuring the given value embedded into the construct; the resulting
:class:`.BindParameter` object is an instance of :class:`.ColumnElement`.
The Python value will ultimately be sent to the DBAPI at execution time as a
paramterized argument to the ``execute()`` or ``executemany()`` methods,
after SQLAlchemy type-specific converters (e.g. those provided by
any associated :class:`.TypeEngine` objects) are applied to the value.
* any special object value, typically ORM-level constructs, which feature
a method called ``__clause_element__()``. The Core expression system
looks for this method when an object of otherwise unknown type is passed
to a function that is looking to coerce the argument into a :class:`.ColumnElement`
expression. The ``__clause_element__()`` method, if present, should
return a :class:`.ColumnElement` instance. The primary use of
``__clause_element__()`` within SQLAlchemy is that of class-bound attributes
on ORM-mapped classes; a ``User`` class which contains a mapped attribute
named ``.name`` will have a method ``User.name.__clause_element__()``
which when invoked returns the :class:`.Column` called ``name`` associated
with the mapped table.
* The Python ``None`` value is typically interpreted as ``NULL``, which
in SQLAlchemy Core produces an instance of :func:`.null`.
A :class:`.ColumnElement` provides the ability to generate new
:class:`.ColumnElement`
objects using Python expressions. This means that Python operators
such as ``==``, ``!=`` and ``<`` are overloaded to mimic SQL operations,
and allow the instantiation of further :class:`.ColumnElement` instances
which are composed from other, more fundamental :class:`.ColumnElement`
objects. For example, two :class:`.ColumnClause` objects can be added
together with the addition operator ``+`` to produce
a :class:`.BinaryExpression`.
Both :class:`.ColumnClause` and :class:`.BinaryExpression` are subclasses
of :class:`.ColumnElement`::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
.. seealso::
:class:`.Column`
:func:`.expression.column`
"""
__visit_name__ = 'column'
primary_key = False
foreign_keys = []
_label = None
_key_label = None
_alt_names = ()
def self_group(self, against=None):
if against in (operators.and_, operators.or_, operators._asbool) and \
self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.istrue, operators.isfalse)
else:
return self
def _negate(self):
if self.type._type_affinity is type_api.BOOLEANTYPE._type_affinity:
return AsBoolean(self, operators.isfalse, operators.istrue)
else:
return super(ColumnElement, self)._negate()
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@util.memoized_property
def comparator(self):
return self.type.comparator_factory(self)
def __getattr__(self, key):
try:
return getattr(self.comparator, key)
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
key)
)
def operate(self, op, *other, **kwargs):
return op(self.comparator, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(other, self.comparator, **kwargs)
def _bind_param(self, operator, obj):
return BindParameter(None, obj,
_compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
@property
def expression(self):
"""Return a column expression.
Part of the inspection interface; returns self.
"""
return self
@property
def _select_iterable(self):
return (self, )
@util.memoized_property
def base_columns(self):
return util.column_set(c for c in self.proxy_set
if not hasattr(c, '_proxies'))
@util.memoized_property
def proxy_set(self):
s = util.column_set([self])
if hasattr(self, '_proxies'):
for c in self._proxies:
s.update(c.proxy_set)
return s
def shares_lineage(self, othercolumn):
"""Return True if the given :class:`.ColumnElement`
has a common ancestor to this :class:`.ColumnElement`."""
return bool(self.proxy_set.intersection(othercolumn.proxy_set))
def _compare_name_for_result(self, other):
"""Return True if the given column element compares to this one
when targeting within a result row."""
return hasattr(other, 'name') and hasattr(self, 'name') and \
other.name == self.name
def _make_proxy(self, selectable, name=None, name_is_truncatable=False, **kw):
"""Create a new :class:`.ColumnElement` representing this
:class:`.ColumnElement` as it appears in the select list of a
descending selectable.
"""
if name is None:
name = self.anon_label
try:
key = str(self)
except exc.UnsupportedCompilationError:
key = self.anon_label
else:
key = name
co = ColumnClause(
_as_truncated(name) if name_is_truncatable else name,
type_=getattr(self, 'type', None),
_selectable=selectable
)
co._proxies = [self]
if selectable._is_clone_of is not None:
co._is_clone_of = \
selectable._is_clone_of.columns.get(key)
selectable._columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
"""Compare this ColumnElement to another.
Special arguments understood:
:param use_proxies: when True, consider two columns that
share a common base column as equivalent (i.e. shares_lineage())
:param equivalents: a dictionary of columns as keys mapped to sets
of columns. If the given "other" column is present in this
dictionary, if any of the columns in the corresponding set() pass the
comparison test, the result is True. This is used to expand the
comparison to other columns that may be known to be equivalent to
this one via foreign key or other criterion.
"""
to_compare = (other, )
if equivalents and other in equivalents:
to_compare = equivalents[other].union(to_compare)
for oth in to_compare:
if use_proxies and self.shares_lineage(oth):
return True
elif hash(oth) == hash(self):
return True
else:
return False
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
This is a shortcut to the :func:`~.expression.label` function.
if 'name' is None, an anonymous label name will be generated.
"""
return Label(name, self, self.type)
@util.memoized_property
def anon_label(self):
"""provides a constant 'anonymous label' for this ColumnElement.
This is a label() expression which will be named at compile time.
The same label() is returned each time anon_label is called so
that expressions can reference anon_label multiple times, producing
the same label name at compile time.
the compiler uses this function automatically at compile time
for expressions that are known to be 'unnamed' like binary
expressions and function calls.
"""
return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self,
'name', 'anon')))
class BindParameter(ColumnElement):
"""Represent a "bound expression".
:class:`.BindParameter` is invoked explicitly using the
:func:`.bindparam` function, as in::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
.. seealso::
:func:`.bindparam`
"""
__visit_name__ = 'bindparam'
_is_crud = False
def __init__(self, key, value=NO_ARG, type_=None,
unique=False, required=NO_ARG,
quote=None, callable_=None,
isoutparam=False,
_compared_to_operator=None,
_compared_to_type=None):
"""Produce a "bound expression".
The return value is an instance of :class:`.BindParameter`; this
is a :class:`.ColumnElement` subclass which represents a so-called
"placeholder" value in a SQL expression, the value of which is supplied
at the point at which the statement in executed against a database
connection.
In SQLAlchemy, the :func:`.bindparam` construct has
the ability to carry along the actual value that will be ultimately
used at expression time. In this way, it serves not just as
a "placeholder" for eventual population, but also as a means of
representing so-called "unsafe" values which should not be rendered
directly in a SQL statement, but rather should be passed along
to the :term:`DBAPI` as values which need to be correctly escaped
and potentially handled for type-safety.
When using :func:`.bindparam` explicitly, the use case is typically
one of traditional deferment of parameters; the :func:`.bindparam`
construct accepts a name which can then be referred to at execution
time::
from sqlalchemy import bindparam
stmt = select([users_table]).\\
where(users_table.c.name == bindparam('username'))
The above statement, when rendered, will produce SQL similar to::
SELECT id, name FROM user WHERE name = :username
In order to populate the value of ``:username`` above, the value
would typically be applied at execution time to a method
like :meth:`.Connection.execute`::
result = connection.execute(stmt, username='wendy')
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = users_table.update().\\
where(user_table.c.name == bindparam('username')).\\
values(fullname=bindparam('fullname'))
connection.execute(stmt, [
{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
])
SQLAlchemy's Core expression system makes wide use of :func:`.bindparam`
in an implicit sense. It is typical that Python literal values passed to
virtually all SQL expression functions are coerced into fixed
:func:`.bindparam` constructs. For example, given a comparison operation
such as::
expr = users_table.c.name == 'Wendy'
The above expression will produce a :class:`.BinaryExpression`
contruct, where the left side is the :class:`.Column` object
representing the ``name`` column, and the right side is a :class:`.BindParameter`
representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
The expression above will render SQL such as::
user.name = :name_1
Where the ``:name_1`` parameter name is an anonymous name. The
actual string ``Wendy`` is not in the rendered string, but is carried
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select([users_table]).where(users_table.c.name == 'Wendy')
result = connection.execute(stmt)
We would see SQL logging output as::
SELECT "user".id, "user".name
FROM "user"
WHERE "user".name = %(name_1)s
{'name_1': 'Wendy'}
Above, we see that ``Wendy`` is passed as a parameter to the database,
while the placeholder ``:name_1`` is rendered in the appropriate form
for the target database, in this case the Postgresql database.
Similarly, :func:`.bindparam` is invoked automatically
when working with :term:`CRUD` statements as far as the "VALUES"
portion is concerned. The :func:`.insert` construct produces an
``INSERT`` expression which will, at statement execution time, generate
bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
The above will produce SQL output as::
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
The :class:`.Insert` construct, at compilation/execution time,
rendered a single :func:`.bindparam` mirroring the column
name ``name`` as a result of the single ``name`` parameter
we passed to the :meth:`.Connection.execute` method.
:param key:
the key (e.g. the name) for this bind param.
Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`BindParameter` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. Will be used at statement
execution time as the value for this parameter passed to the
DBAPI, if no other value is indicated to the statement execution
method for this particular parameter name. Defaults to ``None``.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A :class:`.TypeEngine` class or instance representing an optional
datatype for this :func:`.bindparam`. If not passed, a type
may be determined automatically for the bind, based on the given
value; for example, trivial Python types such as ``str``,
``int``, ``bool``
may result in the :class:`.String`, :class:`.Integer` or
:class:`.Boolean` types being autoamtically selected.
The type of a :func:`.bindparam` is significant especially in that
the type will apply pre-processing to the value before it is
passed to the database. For example, a :func:`.bindparam` which
refers to a datetime value, and is specified as holding the
:class:`.DateTime` type, may apply conversion needed to the
value (such as stringification on SQLite) before passing the value
to the database.
:param unique:
if True, the key name of this :class:`.BindParameter` will be
modified if another :class:`.BindParameter` of the same name
already has been located within the containing
expression. This flag is used generally by the internals
when producing so-called "anonymous" bound expressions, it
isn't generally applicable to explicitly-named :func:`.bindparam`
constructs.
:param required:
If ``True``, a value is required at execution time. If not passed,
it defaults to ``True`` if neither :paramref:`.bindparam.value`
or :paramref:`.bindparam.callable` were passed. If either of these
parameters are present, then :paramref:`.bindparam.required` defaults
to ``False``.
.. versionchanged:: 0.8 If the ``required`` flag is not specified,
it will be set automatically to ``True`` or ``False`` depending
on whether or not the ``value`` or ``callable`` parameters
were specified.
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
support OUT parameters.
.. seealso::
:ref:`coretutorial_bind_param`
:ref:`coretutorial_insert_expressions`
:func:`.outparam`
"""
if isinstance(key, ColumnClause):
type_ = key.type
key = key.name
if required is NO_ARG:
required = (value is NO_ARG and callable_ is None)
if value is NO_ARG:
value = None
if quote is not None:
key = quoted_name(key, quote)
if unique:
self.key = _anonymous_label('%%(%d %s)s' % (id(self), key
or 'param'))
else:
self.key = key or _anonymous_label('%%(%d param)s'
% id(self))
# identifying key that won't change across
# clones, used to identify the bind's logical
# identity
self._identifying_key = self.key
# key that was passed in the first place, used to
# generate new keys
self._orig_key = key or 'param'
self.unique = unique
self.value = value
self.callable = callable_
self.isoutparam = isoutparam
self.required = required
if type_ is None:
if _compared_to_type is not None:
self.type = \
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
self.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
elif isinstance(type_, type):
self.type = type_()
else:
self.type = type_
def _with_value(self, value):
"""Return a copy of this :class:`.BindParameter` with the given value set."""
cloned = self._clone()
cloned.value = value
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
cloned.type = type_api._type_map.get(type(value),
type_api.NULLTYPE)
return cloned
@property
def effective_value(self):
"""Return the value of this bound parameter,
taking into account if the ``callable`` parameter
was set.
The ``callable`` value will be evaluated
and returned if present, else ``value``.
"""
if self.callable:
return self.callable()
else:
return self.value
def _clone(self):
c = ClauseElement._clone(self)
if self.unique:
c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key
or 'param'))
return c
def _convert_to_unique(self):
if not self.unique:
self.unique = True
self.key = _anonymous_label('%%(%d %s)s' % (id(self),
self._orig_key or 'param'))
def compare(self, other, **kw):
"""Compare this :class:`BindParameter` to the given
clause."""
return isinstance(other, BindParameter) \
and self.type._compare_type_affinity(other.type) \
and self.value == other.value
def __getstate__(self):
"""execute a deferred value for serialization purposes."""
d = self.__dict__.copy()
v = self.value
if self.callable:
v = self.callable()
d['callable'] = None
d['value'] = v
return d
def __repr__(self):
return 'BindParameter(%r, %r, type_=%r)' % (self.key,
self.value, self.type)
class TypeClause(ClauseElement):
"""Handle a type keyword in a SQL statement.
Used by the ``Case`` statement.
"""
__visit_name__ = 'typeclause'
def __init__(self, type):
self.type = type
class TextClause(Executable, ClauseElement):
"""Represent a literal SQL text fragment.
E.g.::
from sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`.Text` construct is produced using the :func:`.text`
function; see that function for full documentation.
.. seealso::
:func:`.text`
"""
__visit_name__ = 'textclause'
_bind_params_regex = re.compile(r'(?<![:\w\x5c]):(\w+)(?!:)', re.UNICODE)
_execution_options = \
Executable._execution_options.union(
{'autocommit': PARSE_AUTOCOMMIT})
@property
def _select_iterable(self):
return (self,)
@property
def selectable(self):
return self
_hide_froms = []
def __init__(
self,
text,
bind=None):
self._bind = bind
self._bindparams = {}
def repl(m):
self._bindparams[m.group(1)] = BindParameter(m.group(1))
return ':%s' % m.group(1)
# scan the string and search for bind parameter names, add them
# to the list of bindparams
self.text = self._bind_params_regex.sub(repl, text)
@classmethod
def _create_text(self, text, bind=None, bindparams=None,
typemap=None, autocommit=None):
"""Construct a new :class:`.TextClause` clause, representing
a textual SQL string directly.
E.g.::
fom sqlalchemy import text
t = text("SELECT * FROM users")
result = connection.execute(t)
The advantages :func:`.text` provides over a plain string are
backend-neutral support for bind parameters, per-statement
execution options, as well as
bind parameter and result-column typing behavior, allowing
SQLAlchemy type constructs to play a role when executing
a statement that is specified literally. The construct can also
be provided with a ``.c`` collection of column elements, allowing
it to be embedded in other SQL expression constructs as a subquery.
Bind parameters are specified by name, using the format ``:name``.
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
For SQL statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
t = text("SELECT * FROM users WHERE name='\\:username'")
The :class:`.TextClause` construct includes methods which can
provide information about the bound parameters as well as the column
values which would be returned from the textual statement, assuming
it's an executable SELECT type of statement. The :meth:`.TextClause.bindparams`
method is used to provide bound parameter detail, and
:meth:`.TextClause.columns` method allows specification of
return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\\
bindparams(user_id=7).\\
columns(id=Integer, name=String)
for id, name in connection.execute(t):
print(id, name)
The :func:`.text` construct is used internally in cases when
a literal string is specified for part of a larger query, such as
when a string is specified to the :meth:`.Select.where` method of
:class:`.Select`. In those cases, the same
bind parameter syntax is applied::
s = select([users.c.id, users.c.name]).where("id=:user_id")
result = connection.execute(s, user_id=12)
Using :func:`.text` explicitly usually implies the construction
of a full, standalone statement. As such, SQLAlchemy refers
to it as an :class:`.Executable` object, and it supports
the :meth:`Executable.execution_options` method. For example,
a :func:`.text` construct that should be subject to "autocommit"
can be set explicitly so using the :paramref:`.Connection.execution_options.autocommit`
option::
t = text("EXEC my_procedural_thing()").\\
execution_options(autocommit=True)
Note that SQLAlchemy's usual "autocommit" behavior applies to
:func:`.text` constructs implicitly - that is, statements which begin
with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``,
or a variety of other phrases specific to certain backends, will
be eligible for autocommit if no transaction is in progress.
:param text:
the text of the SQL statement to be created. use ``:<param>``
to specify bind parameters; they will be compiled to their
engine-specific format.
:param autocommit:
Deprecated. Use .execution_options(autocommit=<True|False>)
to set the autocommit option.
:param bind:
an optional connection or engine to be used for this text query.
:param bindparams:
Deprecated. A list of :func:`.bindparam` instances used to
provide information about parameters embedded in the statement.
This argument now invokes the :meth:`.TextClause.bindparams`
method on the construct before returning it. E.g.::
stmt = text("SELECT * FROM table WHERE id=:id",
bindparams=[bindparam('id', value=5, type_=Integer)])
Is equivalent to::
stmt = text("SELECT * FROM table WHERE id=:id").\\
bindparams(bindparam('id', value=5, type_=Integer))
.. deprecated:: 0.9.0 the :meth:`.TextClause.bindparams` method
supersedes the ``bindparams`` argument to :func:`.text`.
:param typemap:
Deprecated. A dictionary mapping the names of columns
represented in the columns clause of a ``SELECT`` statement
to type objects,
which will be used to perform post-processing on columns within
the result set. This parameter now invokes the :meth:`.TextClause.columns`
method, which returns a :class:`.TextAsFrom` construct that gains
a ``.c`` collection and can be embedded in other expressions. E.g.::
stmt = text("SELECT * FROM table",
typemap={'id': Integer, 'name': String},
)
Is equivalent to::
stmt = text("SELECT * FROM table").columns(id=Integer, name=String)
Or alternatively::
from sqlalchemy.sql import column
stmt = text("SELECT * FROM table").columns(
column('id', Integer),
column('name', String)
)
.. deprecated:: 0.9.0 the :meth:`.TextClause.columns` method
supersedes the ``typemap`` argument to :func:`.text`.
"""
stmt = TextClause(text, bind=bind)
if bindparams:
stmt = stmt.bindparams(*bindparams)
if typemap:
stmt = stmt.columns(**typemap)
if autocommit is not None:
util.warn_deprecated('autocommit on text() is deprecated. '
'Use .execution_options(autocommit=True)')
stmt = stmt.execution_options(autocommit=autocommit)
return stmt
@_generative
def bindparams(self, *binds, **names_to_values):
"""Establish the values and/or types of bound parameters within
this :class:`.TextClause` construct.
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
the :meth:`.TextClause.bindparams` method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
values of ``jack`` and ``datetime.datetime(2012, 10, 8, 15, 12, 5)``,
respectively. The types will be
inferred from the values given, in this case :class:`.String` and
:class:`.DateTime`.
When specific typing behavior is needed, the positional ``*binds``
argument can be used in which to specify :func:`.bindparam` constructs
directly. These constructs must include at least the ``key`` argument,
then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
Above, we specified the type of :class:`.DateTime` for the ``timestamp``
bind, and the type of :class:`.String` for the ``name`` bind. In
the case of ``name`` we also set the default value of ``"jack"``.
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
The :meth:`.TextClause.bindparams` method can be called repeatedly, where
it will re-use existing :class:`.BindParameter` objects to add new information.
For example, we can call :meth:`.TextClause.bindparams` first with
typing information, and a second time with value information, and it
will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
.. versionadded:: 0.9.0 The :meth:`.TextClause.bindparams` method supersedes
the argument ``bindparams`` passed to :func:`~.expression.text`.
"""
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
try:
existing = new_params[bind.key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % bind.key)
else:
new_params[existing.key] = bind
for key, value in names_to_values.items():
try:
existing = new_params[key]
except KeyError:
raise exc.ArgumentError(
"This text() construct doesn't define a "
"bound parameter named %r" % key)
else:
new_params[key] = existing._with_value(value)
@util.dependencies('sqlalchemy.sql.selectable')
def columns(self, selectable, *cols, **types):
"""Turn this :class:`.TextClause` object into a :class:`.TextAsFrom`
object that can be embedded into another statement.
This function essentially bridges the gap between an entirely
textual SELECT statement and the SQL expression language concept
of a "selectable"::
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).alias('st')
stmt = select([mytable]).\\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
Above, we used untyped :func:`.column` elements. These can also have
types specified, which will impact how the column behaves in expressions
as well as determining result set behavior::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
Keyword arguments allow just the names and types of columns to be specified,
where the :func:`.column` elements will be generated automatically::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
The :meth:`.TextClause.columns` method provides a direct
route to calling :meth:`.FromClause.alias` as well as :meth:`.SelectBase.cte`
against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = select([sometable]).where(sometable.c.id == stmt.c.id)
.. versionadded:: 0.9.0 :func:`.text` can now be converted into a fully
featured "selectable" construct using the :meth:`.TextClause.columns`
method. This method supersedes the ``typemap`` argument to
:func:`.text`.
"""
col_by_name = dict(
(col.key, col) for col in cols
)
for key, type_ in types.items():
col_by_name[key] = ColumnClause(key, type_)
return selectable.TextAsFrom(self, list(col_by_name.values()))
@property
def type(self):
return type_api.NULLTYPE
@property
def comparator(self):
return self.type.comparator_factory(self)
def self_group(self, against=None):
if against is operators.in_op:
return Grouping(self)
else:
return self
def _copy_internals(self, clone=_clone, **kw):
self._bindparams = dict((b.key, clone(b, **kw))
for b in self._bindparams.values())
def get_children(self, **kwargs):
return list(self._bindparams.values())
class Null(ColumnElement):
"""Represent the NULL keyword in a SQL statement.
:class:`.Null` is accessed as a constant via the
:func:`.null` function.
"""
__visit_name__ = 'null'
@util.memoized_property
def type(self):
return type_api.NULLTYPE
@classmethod
def _singleton(cls):
"""Return a constant :class:`.Null` construct."""
return NULL
def compare(self, other):
return isinstance(other, Null)
class False_(ColumnElement):
"""Represent the ``false`` keyword, or equivalent, in a SQL statement.
:class:`.False_` is accessed as a constant via the
:func:`.false` function.
"""
__visit_name__ = 'false'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return TRUE
@classmethod
def _singleton(cls):
"""Return a constant :class:`.False_` construct.
E.g.::
>>> from sqlalchemy import false
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE false
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(false())
SELECT x FROM t WHERE 0 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.true`
"""
return FALSE
def compare(self, other):
return isinstance(other, False_)
class True_(ColumnElement):
"""Represent the ``true`` keyword, or equivalent, in a SQL statement.
:class:`.True_` is accessed as a constant via the
:func:`.true` function.
"""
__visit_name__ = 'true'
@util.memoized_property
def type(self):
return type_api.BOOLEANTYPE
def _negate(self):
return FALSE
@classmethod
def _ifnone(cls, other):
if other is None:
return cls._singleton()
else:
return other
@classmethod
def _singleton(cls):
"""Return a constant :class:`.True_` construct.
E.g.::
>>> from sqlalchemy import true
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE true
A backend which does not support true/false constants will render as
an expression against 1 or 0::
>>> print select([t.c.x]).where(true())
SELECT x FROM t WHERE 1 = 1
The :func:`.true` and :func:`.false` constants also feature
"short circuit" operation within an :func:`.and_` or :func:`.or_`
conjunction::
>>> print select([t.c.x]).where(or_(t.c.x > 5, true()))
SELECT x FROM t WHERE true
>>> print select([t.c.x]).where(and_(t.c.x > 5, false()))
SELECT x FROM t WHERE false
.. versionchanged:: 0.9 :func:`.true` and :func:`.false` feature
better integrated behavior within conjunctions and on dialects
that don't support true/false constants.
.. seealso::
:func:`.false`
"""
return TRUE
def compare(self, other):
return isinstance(other, True_)
NULL = Null()
FALSE = False_()
TRUE = True_()
class ClauseList(ClauseElement):
"""Describe a list of clauses, separated by an operator.
By default, is comma-separated, such as a column listing.
"""
__visit_name__ = 'clauselist'
def __init__(self, *clauses, **kwargs):
self.operator = kwargs.pop('operator', operators.comma_op)
self.group = kwargs.pop('group', True)
self.group_contents = kwargs.pop('group_contents', True)
if self.group_contents:
self.clauses = [
_literal_as_text(clause).self_group(against=self.operator)
for clause in clauses]
else:
self.clauses = [
_literal_as_text(clause)
for clause in clauses]
def __iter__(self):
return iter(self.clauses)
def __len__(self):
return len(self.clauses)
@property
def _select_iterable(self):
return iter(self)
def append(self, clause):
if self.group_contents:
self.clauses.append(_literal_as_text(clause).\
self_group(against=self.operator))
else:
self.clauses.append(_literal_as_text(clause))
def _copy_internals(self, clone=_clone, **kw):
self.clauses = [clone(clause, **kw) for clause in self.clauses]
def get_children(self, **kwargs):
return self.clauses
@property
def _from_objects(self):
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def compare(self, other, **kw):
"""Compare this :class:`.ClauseList` to the given :class:`.ClauseList`,
including a comparison of all the clause items.
"""
if not isinstance(other, ClauseList) and len(self.clauses) == 1:
return self.clauses[0].compare(other, **kw)
elif isinstance(other, ClauseList) and \
len(self.clauses) == len(other.clauses):
for i in range(0, len(self.clauses)):
if not self.clauses[i].compare(other.clauses[i], **kw):
return False
else:
return self.operator == other.operator
else:
return False
class BooleanClauseList(ClauseList, ColumnElement):
__visit_name__ = 'clauselist'
def __init__(self, *arg, **kw):
raise NotImplementedError(
"BooleanClauseList has a private constructor")
@classmethod
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
clauses = util.coerce_generator_arg(clauses)
for clause in clauses:
clause = _literal_as_text(clause)
if isinstance(clause, continue_on):
continue
elif isinstance(clause, skip_on):
return clause.self_group(against=operators._asbool)
convert_clauses.append(clause)
if len(convert_clauses) == 1:
return convert_clauses[0].self_group(against=operators._asbool)
elif not convert_clauses and clauses:
return clauses[0].self_group(against=operators._asbool)
convert_clauses = [c.self_group(against=operator)
for c in convert_clauses]
self = cls.__new__(cls)
self.clauses = convert_clauses
self.group = True
self.operator = operator
self.group_contents = True
self.type = type_api.BOOLEANTYPE
return self
@classmethod
def and_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``AND``.
E.g.::
from sqlalchemy import and_
stmt = select([users_table]).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`.Select.where` method for example can be invoked multiple
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select([users_table]).\\
where(users_table.c.name == 'wendy').\\
where(users_table.c.enrolled == True)
.. seealso::
:func:`.or_`
"""
return cls._construct(operators.and_, True_, False_, *clauses)
@classmethod
def or_(cls, *clauses):
"""Produce a conjunction of expressions joined by ``OR``.
E.g.::
from sqlalchemy import or_
stmt = select([users_table]).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
need to be parenthesized in order to function with Python
operator precedence behavior)::
stmt = select([users_table]).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
.. seealso::
:func:`.and_`
"""
return cls._construct(operators.or_, False_, True_, *clauses)
@property
def _select_iterable(self):
return (self, )
def self_group(self, against=None):
if not self.clauses:
return self
else:
return super(BooleanClauseList, self).self_group(against=against)
def _negate(self):
return ClauseList._negate(self)
and_ = BooleanClauseList.and_
or_ = BooleanClauseList.or_
class Tuple(ClauseList, ColumnElement):
"""Represent a SQL tuple."""
def __init__(self, *clauses, **kw):
"""Return a :class:`.Tuple`.
Main usage is to produce a composite IN construct::
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
.. warning::
The composite IN construct is not supported by all backends,
and is currently known to work on Postgresql and MySQL,
but not SQLite. Unsupported backends will raise
a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such
an expression is invoked.
"""
clauses = [_literal_as_binds(c) for c in clauses]
self.type = kw.pop('type_', None)
if self.type is None:
self.type = _type_from_args(clauses)
super(Tuple, self).__init__(*clauses, **kw)
@property
def _select_iterable(self):
return (self, )
def _bind_param(self, operator, obj):
return Tuple(*[
BindParameter(None, o, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
for o in obj
]).self_group()
class Case(ColumnElement):
"""Represent a ``CASE`` expression.
:class:`.Case` is produced using the :func:`.case` factory function,
as in::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
Details on :class:`.Case` usage is at :func:`.case`.
.. seealso::
:func:`.case`
"""
__visit_name__ = 'case'
def __init__(self, whens, value=None, else_=None):
"""Produce a ``CASE`` expression.
The ``CASE`` construct in SQL is a conditional object that
acts somewhat analogously to an "if/then" construct in other
languages. It returns an instance of :class:`.Case`.
:func:`.case` in its usual form is passed a list of "when"
contructs, that is, a list of conditions and results as tuples::
from sqlalchemy import case
stmt = select([users_table]).\\
where(
case(
[
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
],
else_='E'
)
)
The above statement will produce SQL resembling::
SELECT id, name FROM user
WHERE CASE
WHEN (name = :name_1) THEN :param_1
WHEN (name = :name_2) THEN :param_2
ELSE :param_3
END
When simple equality expressions of several values against a single
parent column are needed, :func:`.case` also has a "shorthand" format
used via the
:paramref:`.case.value` parameter, which is passed a column
expression to be compared. In this form, the :paramref:`.case.whens`
parameter is passed as a dictionary containing expressions to be compared
against keyed to result expressions. The statement below is equivalent
to the preceding statement::
stmt = select([users_table]).\\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
coerced from Python literals into :func:`.bindparam` constructs.
SQL expressions, e.g. :class:`.ColumnElement` constructs, are accepted
as well. To coerce a literal string expression into a constant
expression rendered inline, use the :func:`.literal_column` construct,
as in::
from sqlalchemy import case, literal_column
case(
[
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
)
],
else_=literal_column("'lessthan10'")
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
WHEN (orderline.qty > :qty_2) THEN 'greaterthan10'
ELSE 'lessthan10'
END
:param whens: The criteria to be compared against, :paramref:`.case.whens`
accepts two different forms, based on whether or not :paramref:`.case.value`
is used.
In the first form, it accepts a list of 2-tuples; each 2-tuple consists
of ``(<sql expression>, <value>)``, where the SQL expression is a
boolean expression and "value" is a resulting value, e.g.::
case([
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
])
In the second form, it accepts a Python dictionary of comparison values
mapped to a resulting value; this form requires :paramref:`.case.value`
to be present, and values will be compared using the ``==`` operator,
e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
:param value: An optional SQL expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
passed to :paramref:`.case.whens`.
:param else\_: An optional SQL expression which will be the evaluated
result of the ``CASE`` construct if all expressions within
:paramref:`.case.whens` evaluate to false. When omitted, most
databases will produce a result of NULL if none of the "when"
expressions evaulate to true.
"""
try:
whens = util.dictlike_iteritems(whens)
except TypeError:
pass
if value is not None:
whenlist = [
(_literal_as_binds(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
else:
whenlist = [
(_no_literals(c).self_group(),
_literal_as_binds(r)) for (c, r) in whens
]
if whenlist:
type_ = list(whenlist[-1])[-1].type
else:
type_ = None
if value is None:
self.value = None
else:
self.value = _literal_as_binds(value)
self.type = type_
self.whens = whenlist
if else_ is not None:
self.else_ = _literal_as_binds(else_)
else:
self.else_ = None
def _copy_internals(self, clone=_clone, **kw):
if self.value is not None:
self.value = clone(self.value, **kw)
self.whens = [(clone(x, **kw), clone(y, **kw))
for x, y in self.whens]
if self.else_ is not None:
self.else_ = clone(self.else_, **kw)
def get_children(self, **kwargs):
if self.value is not None:
yield self.value
for x, y in self.whens:
yield x
yield y
if self.else_ is not None:
yield self.else_
@property
def _from_objects(self):
return list(itertools.chain(*[x._from_objects for x in
self.get_children()]))
def literal_column(text, type_=None):
"""Return a textual column expression, as would be in the columns
clause of a ``SELECT`` statement.
The object returned supports further expressions in the same way as any
other column object, including comparison, math and string operations.
The type\_ parameter is important to determine proper expression behavior
(such as, '+' means string concatenation or numerical addition based on
the type).
:param text: the text of the expression; can be any SQL expression.
Quoting rules will not be applied. To specify a column-name expression
which should be subject to quoting rules, use the :func:`column`
function.
:param type\_: an optional :class:`~sqlalchemy.types.TypeEngine`
object which will
provide result-set translation and additional expression semantics for
this column. If left as None the type will be NullType.
"""
return ColumnClause(text, type_=type_, is_literal=True)
class Cast(ColumnElement):
"""Represent a ``CAST`` expression.
:class:`.Cast` is produced using the :func:`.cast` factory function,
as in::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
Details on :class:`.Cast` usage is at :func:`.cast`.
.. seealso::
:func:`.cast`
"""
__visit_name__ = 'cast'
def __init__(self, expression, type_):
"""Produce a ``CAST`` expression.
:func:`.cast` returns an instance of :class:`.Cast`.
E.g.::
from sqlalchemy import cast, Numeric
stmt = select([
cast(product_table.c.unit_price, Numeric(10, 4))
])
The above statement will produce SQL resembling::
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
The :func:`.cast` function performs two distinct functions when
used. The first is that it renders the ``CAST`` expression within
the resulting SQL string. The second is that it associates the given
type (e.g. :class:`.TypeEngine` class or instance) with the column
expression on the Python side, which means the expression will take
on the expression operator behavior associated with that type,
as well as the bound-value handling and result-row-handling behavior
of the type.
.. versionchanged:: 0.9.0 :func:`.cast` now applies the given type
to the expression such that it takes effect on the bound-value,
e.g. the Python-to-database direction, in addition to the
result handling, e.g. database-to-Python, direction.
An alternative to :func:`.cast` is the :func:`.type_coerce` function.
This function performs the second task of associating an expression
with a specific type, but does not render the ``CAST`` expression
in SQL.
:param expression: A SQL expression, such as a :class:`.ColumnElement`
expression or a Python string which will be coerced into a bound
literal value.
:param type_: A :class:`.TypeEngine` class or instance indicating
the type to which the ``CAST`` should apply.
.. seealso::
:func:`.type_coerce` - Python-side type coercion without emitting
CAST.
"""
self.type = type_api.to_instance(type_)
self.clause = _literal_as_binds(expression, type_=self.type)
self.typeclause = TypeClause(self.type)
def _copy_internals(self, clone=_clone, **kw):
self.clause = clone(self.clause, **kw)
self.typeclause = clone(self.typeclause, **kw)
def get_children(self, **kwargs):
return self.clause, self.typeclause
@property
def _from_objects(self):
return self.clause._from_objects
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
__visit_name__ = 'extract'
def __init__(self, field, expr, **kwargs):
"""Return a :class:`.Extract` construct.
This is typically available as :func:`.extract`
as well as ``func.extract`` from the
:data:`.func` namespace.
"""
self.type = type_api.INTEGERTYPE
self.field = field
self.expr = _literal_as_binds(expr, None)
def _copy_internals(self, clone=_clone, **kw):
self.expr = clone(self.expr, **kw)
def get_children(self, **kwargs):
return self.expr,
@property
def _from_objects(self):
return self.expr._from_objects
class UnaryExpression(ColumnElement):
"""Define a 'unary' expression.
A unary expression has a single column expression
and an operator. The operator can be placed on the left
(where it is called the 'operator') or right (where it is called the
'modifier') of the column expression.
:class:`.UnaryExpression` is the basis for several unary operators
including those used by :func:`.desc`, :func:`.asc`, :func:`.distinct`,
:func:`.nullsfirst` and :func:`.nullslast`.
"""
__visit_name__ = 'unary'
def __init__(self, element, operator=None, modifier=None,
type_=None, negate=None):
self.operator = operator
self.modifier = modifier
self.element = element.self_group(against=self.operator or self.modifier)
self.type = type_api.to_instance(type_)
self.negate = negate
@classmethod
def _create_nullsfirst(cls, column):
"""Produce the ``NULLS FIRST`` modifier for an ``ORDER BY`` expression.
:func:`.nullsfirst` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullsfirst
stmt = select([users_table]).\\
order_by(nullsfirst(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
Like :func:`.asc` and :func:`.desc`, :func:`.nullsfirst` is typically
invoked from the column expression itself using :meth:`.ColumnElement.nullsfirst`,
rather than as its standalone function version, as in::
stmt = select([users_table]).\\
order_by(users_table.c.name.desc().nullsfirst())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.nullsfirst_op)
@classmethod
def _create_nullslast(cls, column):
"""Produce the ``NULLS LAST`` modifier for an ``ORDER BY`` expression.
:func:`.nullslast` is intended to modify the expression produced
by :func:`.asc` or :func:`.desc`, and indicates how NULL values
should be handled when they are encountered during ordering::
from sqlalchemy import desc, nullslast
stmt = select([users_table]).\\
order_by(nullslast(desc(users_table.c.name)))
The SQL expression from the above would resemble::
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
Like :func:`.asc` and :func:`.desc`, :func:`.nullslast` is typically
invoked from the column expression itself using :meth:`.ColumnElement.nullslast`,
rather than as its standalone function version, as in::
stmt = select([users_table]).\\
order_by(users_table.c.name.desc().nullslast())
.. seealso::
:func:`.asc`
:func:`.desc`
:func:`.nullsfirst`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.nullslast_op)
@classmethod
def _create_desc(cls, column):
"""Produce a descending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import desc
stmt = select([users_table]).order_by(desc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name DESC
The :func:`.desc` function is a standalone version of the
:meth:`.ColumnElement.desc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.desc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.desc` operation.
.. seealso::
:func:`.asc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.desc_op)
@classmethod
def _create_asc(cls, column):
"""Produce an ascending ``ORDER BY`` clause element.
e.g.::
from sqlalchemy import asc
stmt = select([users_table]).order_by(asc(users_table.c.name))
will produce SQL as::
SELECT id, name FROM user ORDER BY name ASC
The :func:`.asc` function is a standalone version of the
:meth:`.ColumnElement.asc` method available on all SQL expressions,
e.g.::
stmt = select([users_table]).order_by(users_table.c.name.asc())
:param column: A :class:`.ColumnElement` (e.g. scalar SQL expression)
with which to apply the :func:`.asc` operation.
.. seealso::
:func:`.desc`
:func:`.nullsfirst`
:func:`.nullslast`
:meth:`.Select.order_by`
"""
return UnaryExpression(
_literal_as_text(column), modifier=operators.asc_op)
@classmethod
def _create_distinct(cls, expr):
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
from sqlalchemy import distinct, func
stmt = select([func.count(distinct(users_table.c.name))])
The above would produce an expression resembling::
SELECT COUNT(DISTINCT name) FROM user
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`.ColumnElement.distinct`, as in::
stmt = select([func.count(users_table.c.name.distinct())])
The :func:`.distinct` operator is different from the
:meth:`.Select.distinct` method of :class:`.Select`,
which produces a ``SELECT`` statement
with ``DISTINCT`` applied to the result set as a whole,
e.g. a ``SELECT DISTINCT`` expression. See that method for further
information.
.. seealso::
:meth:`.ColumnElement.distinct`
:meth:`.Select.distinct`
:data:`.func`
"""
expr = _literal_as_binds(expr)
return UnaryExpression(expr,
operator=operators.distinct_op, type_=expr.type)
@util.memoized_property
def _order_by_label_element(self):
if self.modifier in (operators.desc_op, operators.asc_op):
return self.element._order_by_label_element
else:
return None
@property
def _from_objects(self):
return self.element._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
def compare(self, other, **kw):
"""Compare this :class:`UnaryExpression` against the given
:class:`.ClauseElement`."""
return (
isinstance(other, UnaryExpression) and
self.operator == other.operator and
self.modifier == other.modifier and
self.element.compare(other.element, **kw)
)
def _negate(self):
if self.negate is not None:
return UnaryExpression(
self.element,
operator=self.negate,
negate=self.operator,
modifier=self.modifier,
type_=self.type)
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
self.element = element
self.type = type_api.BOOLEANTYPE
self.operator = operator
self.negate = negate
self.modifier = None
def self_group(self, against=None):
return self
def _negate(self):
return self.element._negate()
class BinaryExpression(ColumnElement):
"""Represent an expression that is ``LEFT <operator> RIGHT``.
A :class:`.BinaryExpression` is generated automatically
whenever two column expressions are used in a Python binary expresion::
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print column('a') + column('b')
a + b
"""
__visit_name__ = 'binary'
def __init__(self, left, right, operator, type_=None,
negate=None, modifiers=None):
# allow compatibility with libraries that
# refer to BinaryExpression directly and pass strings
if isinstance(operator, util.string_types):
operator = operators.custom_op(operator)
self._orig = (left, right)
self.left = left.self_group(against=operator)
self.right = right.self_group(against=operator)
self.operator = operator
self.type = type_api.to_instance(type_)
self.negate = negate
if modifiers is None:
self.modifiers = {}
else:
self.modifiers = modifiers
def __bool__(self):
if self.operator in (operator.eq, operator.ne):
return self.operator(hash(self._orig[0]), hash(self._orig[1]))
else:
raise TypeError("Boolean value of this clause is not defined")
__nonzero__ = __bool__
@property
def is_comparison(self):
return operators.is_comparison(self.operator)
@property
def _from_objects(self):
return self.left._from_objects + self.right._from_objects
def _copy_internals(self, clone=_clone, **kw):
self.left = clone(self.left, **kw)
self.right = clone(self.right, **kw)
def get_children(self, **kwargs):
return self.left, self.right
def compare(self, other, **kw):
"""Compare this :class:`BinaryExpression` against the
given :class:`BinaryExpression`."""
return (
isinstance(other, BinaryExpression) and
self.operator == other.operator and
(
self.left.compare(other.left, **kw) and
self.right.compare(other.right, **kw) or
(
operators.is_commutative(self.operator) and
self.left.compare(other.right, **kw) and
self.right.compare(other.left, **kw)
)
)
)
def self_group(self, against=None):
if operators.is_precedent(self.operator, against):
return Grouping(self)
else:
return self
def _negate(self):
if self.negate is not None:
return BinaryExpression(
self.left,
self.right,
self.negate,
negate=self.operator,
type_=type_api.BOOLEANTYPE,
modifiers=self.modifiers)
else:
return super(BinaryExpression, self)._negate()
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
__visit_name__ = 'grouping'
def __init__(self, element):
self.element = element
self.type = getattr(element, 'type', type_api.NULLTYPE)
def self_group(self, against=None):
return self
@property
def _label(self):
return getattr(self.element, '_label', None) or self.anon_label
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
def get_children(self, **kwargs):
return self.element,
@property
def _from_objects(self):
return self.element._from_objects
def __getattr__(self, attr):
return getattr(self.element, attr)
def __getstate__(self):
return {'element': self.element, 'type': self.type}
def __setstate__(self, state):
self.element = state['element']
self.type = state['type']
def compare(self, other, **kw):
return isinstance(other, Grouping) and \
self.element.compare(other.element)
class Over(ColumnElement):
"""Represent an OVER clause.
This is a special operator against a so-called
"window" function, as well as any aggregate function,
which produces results relative to the result set
itself. It's supported only by certain database
backends.
"""
__visit_name__ = 'over'
order_by = None
partition_by = None
def __init__(self, func, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
E.g.::
from sqlalchemy import over
over(func.row_number(), order_by='x')
Would produce "ROW_NUMBER() OVER(ORDER BY x)".
:param func: a :class:`.FunctionElement` construct, typically
generated by :data:`~.expression.func`.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
:param order_by: a column element or string, or a list
of such, that will be used as the ORDER BY clause
of the OVER construct.
This function is also available from the :data:`~.expression.func`
construct itself via the :meth:`.FunctionElement.over` method.
.. versionadded:: 0.7
"""
self.func = func
if order_by is not None:
self.order_by = ClauseList(*util.to_list(order_by))
if partition_by is not None:
self.partition_by = ClauseList(*util.to_list(partition_by))
@util.memoized_property
def type(self):
return self.func.type
def get_children(self, **kwargs):
return [c for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
self.func = clone(self.func, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
(self.func, self.partition_by, self.order_by)
if c is not None]
))
class Label(ColumnElement):
"""Represents a column label (AS).
Represent a label, as typically applied to any column-level
element using the ``AS`` sql keyword.
"""
__visit_name__ = 'label'
def __init__(self, name, element, type_=None):
"""Return a :class:`Label` object for the
given :class:`.ColumnElement`.
A label changes the name of an element in the columns clause of a
``SELECT`` statement, typically via the ``AS`` SQL keyword.
This functionality is more conveniently available via the
:meth:`.ColumnElement.label` method on :class:`.ColumnElement`.
:param name: label name
:param obj: a :class:`.ColumnElement`.
"""
while isinstance(element, Label):
element = element.element
if name:
self.name = name
else:
self.name = _anonymous_label('%%(%d %s)s' % (id(self),
getattr(element, 'name', 'anon')))
self.key = self._label = self._key_label = self.name
self._element = element
self._type = type_
self._proxies = [element]
def __reduce__(self):
return self.__class__, (self.name, self._element, self._type)
@util.memoized_property
def _order_by_label_element(self):
return self
@util.memoized_property
def type(self):
return type_api.to_instance(
self._type or getattr(self._element, 'type', None)
)
@util.memoized_property
def element(self):
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
sub_element = self._element.self_group(against=against)
if sub_element is not self._element:
return Label(self.name,
sub_element,
type_=self._type)
else:
return self
@property
def primary_key(self):
return self.element.primary_key
@property
def foreign_keys(self):
return self.element.foreign_keys
def get_children(self, **kwargs):
return self.element,
def _copy_internals(self, clone=_clone, **kw):
self.element = clone(self.element, **kw)
@property
def _from_objects(self):
return self.element._from_objects
def _make_proxy(self, selectable, name=None, **kw):
e = self.element._make_proxy(selectable,
name=name if name else self.name)
e._proxies.append(self)
if self._type is not None:
e.type = self._type
return e
class ColumnClause(Immutable, ColumnElement):
"""Represents a column expression from any textual string.
The :class:`.ColumnClause`, a lightweight analogue to the
:class:`.Column` class, is typically invoked using the
:func:`.column` function, as in::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
:class:`.ColumnClause` is the immediate superclass of the schema-specific
:class:`.Column` object. While the :class:`.Column` class has all the
same capabilities as :class:`.ColumnClause`, the :class:`.ColumnClause`
class is usable by itself in those cases where behavioral requirements
are limited to simple SQL expression generation. The object has none of the
associations with schema-level metadata or with execution-time behavior
that :class:`.Column` does, so in that sense is a "lightweight" version
of :class:`.Column`.
Full details on :class:`.ColumnClause` usage is at :func:`.column`.
.. seealso::
:func:`.column`
:class:`.Column`
"""
__visit_name__ = 'column'
onupdate = default = server_default = server_onupdate = None
_memoized_property = util.group_expirable_memoized_property()
def __init__(self, text, type_=None, is_literal=False, _selectable=None):
"""Produce a :class:`.ColumnClause` object.
The :class:`.ColumnClause` is a lightweight analogue to the
:class:`.Column` class. The :func:`.column` function can
be invoked with just a name alone, as in::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The above statement would produce SQL like::
SELECT id, name FROM user
Once constructed, :func:`.column` may be used like any other SQL expression
element such as within :func:`.select` constructs::
from sqlalchemy.sql import column
id, name = column("id"), column("name")
stmt = select([id, name]).select_from("user")
The text handled by :func:`.column` is assumed to be handled
like the name of a database column; if the string contains mixed case,
special characters, or matches a known reserved word on the target
backend, the column expression will render using the quoting
behavior determined by the backend. To produce a textual SQL
expression that is rendered exactly without any quoting,
use :func:`.literal_column` instead, or pass ``True`` as the
value of :paramref:`.column.is_literal`. Additionally, full SQL
statements are best handled using the :func:`.text` construct.
:func:`.column` can be used in a table-like
fashion by combining it with the :func:`.table` function
(which is the lightweight analogue to :class:`.Table`) to produce
a working table construct with minimal boilerplate::
from sqlalchemy.sql import table, column
user = table("user",
column("id"),
column("name"),
column("description"),
)
stmt = select([user.c.description]).where(user.c.name == 'wendy')
A :func:`.column` / :func:`.table` construct like that illustrated
above can be created in an
ad-hoc fashion and is not associated with any :class:`.schema.MetaData`,
DDL, or events, unlike its :class:`.Table` counterpart.
:param text: the text of the element.
:param type: :class:`.types.TypeEngine` object which can associate
this :class:`.ColumnClause` with a type.
:param is_literal: if True, the :class:`.ColumnClause` is assumed to
be an exact expression that will be delivered to the output with no
quoting rules applied regardless of case sensitive settings. the
:func:`.literal_column()` function essentially invokes :func:`.column`
while passing ``is_literal=True``.
.. seealso::
:class:`.Column`
:func:`.literal_column`
:func:`.text`
:ref:`metadata_toplevel`
"""
self.key = self.name = text
self.table = _selectable
self.type = type_api.to_instance(type_)
self.is_literal = is_literal
def _compare_name_for_result(self, other):
if self.is_literal or \
self.table is None or self.table._textual or \
not hasattr(other, 'proxy_set') or (
isinstance(other, ColumnClause) and
(other.is_literal or
other.table is None or
other.table._textual)
):
return (hasattr(other, 'name') and self.name == other.name) or \
(hasattr(other, '_label') and self._label == other._label)
else:
return other.proxy_set.intersection(self.proxy_set)
def _get_table(self):
return self.__dict__['table']
def _set_table(self, table):
self._memoized_property.expire_instance(self)
self.__dict__['table'] = table
table = property(_get_table, _set_table)
@_memoized_property
def _from_objects(self):
t = self.table
if t is not None:
return [t]
else:
return []
@util.memoized_property
def description(self):
if util.py3k:
return self.name
else:
return self.name.encode('ascii', 'backslashreplace')
@_memoized_property
def _key_label(self):
if self.key != self.name:
return self._gen_label(self.key)
else:
return self._label
@_memoized_property
def _label(self):
return self._gen_label(self.name)
def _gen_label(self, name):
t = self.table
if self.is_literal:
return None
elif t is not None and t.named_with_column:
if getattr(t, 'schema', None):
label = t.schema.replace('.', '_') + "_" + \
t.name + "_" + name
else:
label = t.name + "_" + name
# propagate name quoting rules for labels.
if getattr(name, "quote", None) is not None:
if isinstance(label, quoted_name):
label.quote = name.quote
else:
label = quoted_name(label, name.quote)
elif getattr(t.name, "quote", None) is not None:
# can't get this situation to occur, so let's
# assert false on it for now
assert not isinstance(label, quoted_name)
label = quoted_name(label, t.name.quote)
# ensure the label name doesn't conflict with that
# of an existing column
if label in t.c:
_label = label
counter = 1
while _label in t.c:
_label = label + "_" + str(counter)
counter += 1
label = _label
return _as_truncated(label)
else:
return name
def _bind_param(self, operator, obj):
return BindParameter(self.name, obj,
_compared_to_operator=operator,
_compared_to_type=self.type,
unique=True)
def _make_proxy(self, selectable, name=None, attach=True,
name_is_truncatable=False, **kw):
# propagate the "is_literal" flag only if we are keeping our name,
# otherwise its considered to be a label
is_literal = self.is_literal and (name is None or name == self.name)
c = self._constructor(
_as_truncated(name or self.name) if \
name_is_truncatable else \
(name or self.name),
type_=self.type,
_selectable=selectable,
is_literal=is_literal
)
if name is None:
c.key = self.key
c._proxies = [self]
if selectable._is_clone_of is not None:
c._is_clone_of = \
selectable._is_clone_of.columns.get(c.key)
if attach:
selectable._columns[c.key] = c
return c
class _IdentifiedClause(Executable, ClauseElement):
__visit_name__ = 'identified'
_execution_options = \
Executable._execution_options.union({'autocommit': False})
def __init__(self, ident):
self.ident = ident
class SavepointClause(_IdentifiedClause):
__visit_name__ = 'savepoint'
class RollbackToSavepointClause(_IdentifiedClause):
__visit_name__ = 'rollback_to_savepoint'
class ReleaseSavepointClause(_IdentifiedClause):
__visit_name__ = 'release_savepoint'
class quoted_name(util.text_type):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as :class:`.Table`,
:class:`.Column`, and others. The class can also be passed explicitly
as the name to any function that receives a name which can be quoted.
Such as to use the :meth:`.Engine.has_table` method with an unconditionally
quoted name::
from sqlaclchemy import create_engine
from sqlalchemy.sql.elements import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine.has_table(quoted_name("some_table", True))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionadded:: 0.9.0
"""
def __new__(cls, value, quote):
if value is None:
return None
# experimental - don't bother with quoted_name
# if quote flag is None. doesn't seem to make any dent
# in performance however
# elif not sprcls and quote is None:
# return value
elif isinstance(value, cls) and (
quote is None or value.quote == quote
):
return value
self = super(quoted_name, cls).__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (util.text_type(self), self.quote)
@util.memoized_instancemethod
def lower(self):
if self.quote:
return self
else:
return util.text_type(self).lower()
@util.memoized_instancemethod
def upper(self):
if self.quote:
return self
else:
return util.text_type(self).upper()
def __repr__(self):
backslashed = self.encode('ascii', 'backslashreplace')
if not util.py2k:
backslashed = backslashed.decode('ascii')
return "'%s'" % backslashed
class _truncated_label(quoted_name):
"""A unicode subclass used to identify symbolic "
"names that may require truncation."""
def __new__(cls, value, quote=None):
quote = getattr(value, "quote", quote)
#return super(_truncated_label, cls).__new__(cls, value, quote, True)
return super(_truncated_label, cls).__new__(cls, value, quote)
def __reduce__(self):
return self.__class__, (util.text_type(self), self.quote)
def apply_map(self, map_):
return self
# for backwards compatibility in case
# someone is re-implementing the
# _truncated_identifier() sequence in a custom
# compiler
_generated_label = _truncated_label
class _anonymous_label(_truncated_label):
"""A unicode subclass used to identify anonymously
generated names."""
def __add__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(self, util.text_type(other)),
self.quote)
)
def __radd__(self, other):
return _anonymous_label(
quoted_name(
util.text_type.__add__(util.text_type(other), self),
self.quote)
)
def apply_map(self, map_):
if self.quote is not None:
# preserve quoting only if necessary
return quoted_name(self % map_, self.quote)
else:
# else skip the constructor call
return self % map_
def _as_truncated(value):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
:class:`._anonymous_label` objects are passed
unchanged.
"""
if isinstance(value, _truncated_label):
return value
else:
return _truncated_label(value)
def _string_or_unprintable(element):
if isinstance(element, util.string_types):
return element
else:
try:
return str(element)
except:
return "unprintable element %r" % element
def _expand_cloned(elements):
"""expand the given set of ClauseElements to be the set of all 'cloned'
predecessors.
"""
return itertools.chain(*[x._cloned_set for x in elements])
def _select_iterables(elements):
"""expand tables into individual columns in the
given list of column expressions.
"""
return itertools.chain(*[c._select_iterable for c in elements])
def _cloned_intersection(a, b):
"""return the intersection of sets a and b, counting
any overlap between 'cloned' predecessors.
The returned set is in terms of the entities present within 'a'.
"""
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if all_overlap.intersection(elem._cloned_set))
def _cloned_difference(a, b):
all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b))
return set(elem for elem in a
if not all_overlap.intersection(elem._cloned_set))
def _labeled(element):
if not hasattr(element, 'name'):
return element.label(None)
else:
return element
def _is_column(col):
"""True if ``col`` is an instance of :class:`.ColumnElement`."""
return isinstance(col, ColumnElement)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = util.column_set()
traverse(clause, {}, {'column': cols.add})
return cols
# there is some inconsistency here between the usage of
# inspect() vs. checking for Visitable and __clause_element__.
# Ideally all functions here would derive from inspect(),
# however the inspect() versions add significant callcount
# overhead for critical functions like _interpret_as_column_or_from().
# Generally, the column-based functions are more performance critical
# and are fine just checking for __clause_element__(). it's only
# _interpret_as_from() where we'd like to be able to receive ORM entities
# that have no defined namespace, hence inspect() is needed there.
def _column_as_key(element):
if isinstance(element, util.string_types):
return element
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
try:
return element.key
except AttributeError:
return None
def _clause_element_as_expr(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
else:
return element
def _literal_as_text(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif isinstance(element, util.string_types):
return TextClause(util.text_type(element))
elif isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
else:
raise exc.ArgumentError(
"SQL expression object or string expected."
)
def _no_literals(element):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' "
"function to indicate a SQL expression "
"literal, or 'literal()' to indicate a "
"bound value." % element)
else:
return element
def _is_literal(element):
return not isinstance(element, Visitable) and \
not hasattr(element, '__clause_element__')
def _only_column_elements_or_none(element, name):
if element is None:
return None
else:
return _only_column_elements(element, name)
def _only_column_elements(element, name):
if hasattr(element, '__clause_element__'):
element = element.__clause_element__()
if not isinstance(element, ColumnElement):
raise exc.ArgumentError(
"Column-based expression object expected for argument "
"'%s'; got: '%s', type %s" % (name, element, type(element)))
return element
def _literal_as_binds(element, name=None, type_=None):
if hasattr(element, '__clause_element__'):
return element.__clause_element__()
elif not isinstance(element, Visitable):
if element is None:
return Null()
else:
return BindParameter(name, element, type_=type_, unique=True)
else:
return element
def _interpret_as_column_or_from(element):
if isinstance(element, Visitable):
return element
elif hasattr(element, '__clause_element__'):
return element.__clause_element__()
insp = inspection.inspect(element, raiseerr=False)
if insp is None:
if isinstance(element, (util.NoneType, bool)):
return _const_expr(element)
elif hasattr(insp, "selectable"):
return insp.selectable
return ColumnClause(str(element), is_literal=True)
def _const_expr(element):
if isinstance(element, (Null, False_, True_)):
return element
elif element is None:
return Null()
elif element is False:
return False_()
elif element is True:
return True_()
else:
raise exc.ArgumentError(
"Expected None, False, or True"
)
def _type_from_args(args):
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE
def _corresponding_column_or_error(fromclause, column,
require_embedded=False):
c = fromclause.corresponding_column(column,
require_embedded=require_embedded)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
%
(column,
getattr(column, 'table', None),
fromclause.description)
)
return c
class AnnotatedColumnElement(Annotated):
def __init__(self, element, values):
Annotated.__init__(self, element, values)
ColumnElement.comparator._reset(self)
for attr in ('name', 'key', 'table'):
if self.__dict__.get(attr, False) is None:
self.__dict__.pop(attr)
def _with_annotations(self, values):
clone = super(AnnotatedColumnElement, self)._with_annotations(values)
ColumnElement.comparator._reset(clone)
return clone
@util.memoized_property
def name(self):
"""pull 'name' from parent, if not present"""
return self._Annotated__element.name
@util.memoized_property
def table(self):
"""pull 'table' from parent, if not present"""
return self._Annotated__element.table
@util.memoized_property
def key(self):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
@util.memoized_property
def info(self):
return self._Annotated__element.info
|
FRC-Team-3140/north-american-happiness
|
lib/python2.7/site-packages/sqlalchemy/sql/elements.py
|
Python
|
mit
| 117,479
|
[
"VisIt"
] |
3b3ca0ea84b1509d11ae213cf17fca545b7f53a42af2e47864c60b4713fb40f5
|
import memcache
import sys
from edge import EdgeLocation
class GlobalMap:
def __init__(self, map_filename):
self.map = {}
self.load_map(map_filename)
self.null_edge = EdgeLocation('', '', [])
def add_neighbors(self, u, v):
if not u in self.map:
self.map[u] = set()
if not v in self.map[u]:
self.map[u].add(v)
def load_map(self, map_filename):
self.map = {}
f = open(map_filename, 'r')
for line in f.readlines():
(u, v) = line.split()
self.add_neighbors(u, v)
self.add_neighbors(v, u)
def check_connectivity(self):
visited = {}
src = None
for country in self.map.iterkeys():
visited[country] = False
if not src:
src = country
# visit countries with a BFS
queue = [src]
visited[src] = True
while queue:
country = queue.pop(0)
for neighbor in self.map[country]:
if not visited[neighbor]:
queue.append(neighbor)
visited[neighbor] = True
for country in visited:
if not visited[country]:
raise Exception('Countries map does not have connectivity: '
'%s unreachable' % (country))
def assign_edge_locations(self, edge_locations):
# group edge locations by country
by_country = {}
for node in edge_locations:
if not node.country in by_country:
by_country[node.country] = []
by_country[node.country].append(node)
# init the BFS parameters
dist = {}
parent = {}
for country in self.map.iterkeys():
dist[country] = sys.maxint
parent[country] = None
queue = []
for country in by_country:
queue.append(country)
dist[country] = 0
parent[country] = country
while queue:
country = queue.pop(0)
for neighbor in self.map[country]:
if dist[neighbor] > dist[country] + 1:
dist[neighbor] = dist[country] + 1;
parent[neighbor] = parent[country]
queue.append(neighbor)
# assign an edge location to each country
self.edge_map = {}
for country in self.map.iterkeys():
if parent[country]:
el = by_country[parent[country]]
self.edge_map[country] = el[0]
if len(el) > 1:
el.append(el.pop(0))
else:
# if there is no edge location
self.edge_map[country] = self.null_edge
def update_memcache(self):
mc = memcache.Client(['127.0.0.1:11211'])
data = {}
for country in self.edge_map:
data[country] = self.edge_map[country].address
mc.set_multi(data, 900)
def __str__(self):
return str(self.map)
|
ema/conpaas
|
conpaas-services/src/conpaas/services/cds/manager/map.py
|
Python
|
bsd-3-clause
| 3,049
|
[
"VisIt"
] |
8ef0b5428c7e610639a1162ab07ad941a6cd6f7634b0d519c3f76adb70d1fca5
|
scalapack = False
compiler = 'gcc'
extra_compile_args += [
'-O3',
'-funroll-all-loops',
'-fPIC',
]
libraries = ['gfortran', 'util']
blas_lib_path = '/home/lv70174/gpaw/opt/acml-4.0.1/gfortran64/lib/'
lapack_lib_path = blas_lib_path
extra_link_args = [
blas_lib_path+'libacml.a',
lapack_lib_path+'libacml.a',
]
|
robwarm/gpaw-symm
|
doc/install/Linux/customize_vsc_univie.py
|
Python
|
gpl-3.0
| 343
|
[
"GPAW"
] |
c4f6749ec58e10d701e3b64d37edbcbfb28d2144b9c95d919d530a5018e88803
|
#!/usr/bin/env python
import sys
import os
import scipy.optimize
import re
import math
from optparse import OptionParser
HELP="""This program will optimize the energy with respect to any numerical parameter
which is preceded by a key character (typically a *, but this can be changed by
changing the 'key' variable in the code).
The final optimized file will be in 'staropt.inp'
Supported DFT/quantum chemistry programs are listed below. As part of the
installation, you may need to change the executables to the correct location for
your computer.
usage: %s [options] inputfile
"""%sys.argv[0]
key="*"
GAMESS_EXE="gms"
CRYSTAL_EXE="crystal"
def gen_parm_list(lines):
"""Search lines for numbers with *'s next to them and extract
them into a list of parameters"""
parms=[]
for line in lines:
spl=line.split()
for word in spl:
if word[0]==key:
parms.append(float(word[1:]))
return parms
def apply_parm_to_list(parms,lines):
count=0
newlines=[]
for line in lines:
if line.count(key) == 0:
newlines.append(line)
else:
spl=line.split()
newline=""
for i,word in enumerate(spl):
if word[0]==key:
newline+=str(parms[count])+" "
count+=1
else:
newline+=word+ " "
newlines.append(newline+"\n")
return newlines
def get_en_gamess(parms,lines):
nwlines=apply_parm_to_list(parms,lines)
f=open("staropt.inp",'w')
for line in nwlines:
f.write(line)
f.close()
os.system(GAMESS_EXE+" staropt.inp")
f=open("staropt.log",'r')
en=0.0
for l in f.readlines():
if l.count("TOTAL ENERGY =")>0:
spl=l.split()
en=float(spl[3])
print "en ",en, parms
sys.stdout.flush()
return en
def get_en_crys(parms,lines):
nwlines=apply_parm_to_list(parms,lines)
f=open("staropt.inp",'w')
for line in nwlines:
f.write(line)
f.close()
os.system(CRYSTAL_EXE+" < staropt.inp > staropt.inp.o")
f=open("staropt.inp.o",'r')
en=0.0
for l in f.readlines():
if l.count("SCF ENDED")>0:
spl=l.split()
en=float(spl[8])
if math.isnan(en):
en=1e8
print "en ",en, parms
sys.stdout.flush()
return en
if __name__ == "__main__":
parser = OptionParser(usage=HELP)
parser.add_option("-g", "--gamess",action="store_true", default=False,
help="use GAMESS to evaluate file.")
parser.add_option("-c", "--crystal",action="store_true", default=False,
help="use CRYSTAL to evaluate file.")
(options, args) = parser.parse_args()
if len(args) < 1:
parser.print_usage()
quit()
f=open(args[0],'r')
lines=f.readlines()
f.close()
parms=gen_parm_list(lines)
if options.gamess:
parmopt=scipy.optimize.fmin_powell(get_en_gamess,parms,args=[lines])
get_en_gamess(parmopt,lines)
elif options.crystal:
parmopt=scipy.optimize.fmin_powell(get_en_crys,parms,args=[lines])
get_en_crys(parmopt,lines)
else:
parser.print_usage()
quit()
|
QWalk/mainline
|
utils/optimize_stars.py
|
Python
|
gpl-2.0
| 2,986
|
[
"CRYSTAL",
"GAMESS"
] |
53a6743a3a25a418e86ebefaedfd6f5ccc4953bb4ec240fa5003cc5bc9b97cb1
|
# -*- coding:Utf-8 -*-
import doctest
import os
import logging
import pdb
import sys
import numpy as np
import scipy as sp
import scipy.io as io
import scipy.signal as si
import scipy.linalg as la
import matplotlib.pylab as plt
import pylayers.signal.bsignal as bs
from pylayers.measures import mesuwb
class Waveform(dict):
"""
Attributes
----------
st : time domain
sf : frequency domain
sfg : frequency domain integrated
Methods
-------
eval
showpsd
ip_generic
fromfile
fromfile2
read
gui
show
"""
def __init__(self,**kwargs):
"""
Parameters
----------
'typ' : string
'generic',
'WGHz': float
0.499
'fcGHz': float
4.493
'fsGHz': float
100,
'threshdB':
3,
'twns': float
30
typ : 'generic','W1compensate','W1offset'
"""
defaults = {'typ':'generic',
'fGHz':[],
'WGHz': 0.499,
'fcGHz': 4.493,
'fsGHz': 100,
'threshdB': 3,
'twns': 30}
for key, value in defaults.items():
if key not in kwargs:
self[key] = value
else:
self[key] = kwargs[key]
self.eval()
def eval(self):
u""" evaluate waveform
The :math:`\lambda/4*\pi` factor which is necessary to get the proper budget
link ( from the Friis formula) is introduced in this function.
"""
if self['typ'] == 'generic':
[st,sf]=self.ip_generic()
#elif self['typ'] == 'mbofdm':
# [st,sf]=self.mbofdm()
elif self['typ'] == 'W1compensate':
[st,sf]=self.fromfile()
elif self['typ'] == 'W1offset':
[st,sf]=self.fromfile2()
elif self['typ'] == 'blackmann':
sf = bs.FUsignal(x=fGHz,y=np.blackman(len(fGHz)))
st = sf.ift()
elif self['typ'] == 'rect':
sf = bs.FUsignal(x=fGHz,y=np.ones(len(fGHz)))
st = sf.ift()
elif self['typ'] == 'hamming':
sf = bs.FUsignal(x=fGHz,y=np.hamming(len(fGHz)))
st = sf.ift()
elif self['typ'] == 'ref156':
[st,sf] = self.ref156()
else:
logging.critical('waveform typ not recognized, check your config \
file')
self.st = st
self.sf = sf
self.fGHz = self.sf.x
ygamma = -1j*0.3/(4*np.pi*self.fGHz)
self.gamm = bs.FUsignal(x=self.fGHz,y=ygamma)
self.sfg = self.sf*self.gamm
self.sfgh = self.sfg.symH(0)
self.stgh = self.sfgh.ifft(1)
def info(self):
""" display information about waveform
Results
-------
>>> from pylayers.signal.waveform import *
>>> w = Waveform(typ='generic',WGHz=0.499,fcGHz=4.49,fsGHz=100,threshdB=3,twns=30)
>>> w.show()
>>> plt.show()
"""
if self['typ']=='generic':
for k in self.keys():
print(k , " : ",self[k])
else:
print("typ:",self['typ'])
def showpsd(self,Tpns=1000):
""" show psd
Parameters
----------
Tpns : float
"""
plt.subplot(211)
self.st.plot()
plt.subplot(212)
psd = self.st.psd(Tpns,50)
plt.title('Tp = '+str(Tpns)+' ns')
psd.plotdB(mask=True)
def ip_generic(self):
""" Create an energy normalized Gaussian impulse (Usignal)
ip_generic(self,parameters)
"""
Tw = self['twns']
fcGHz = self['fcGHz']
WGHz = self['WGHz']
thresh = self['threshdB']
fsGHz = self['fsGHz']
ts = 1.0/fsGHz
self['ts'] = ts
Np = fsGHz*Tw
self['Np'] = Np
#x = np.linspace(-0.5*Tw+ts/2,0.5*Tw+ts/2,Np,endpoint=False)
#x = arange(-Tw,Tw,ts)
w = bs.TUsignal()
w.EnImpulse(fcGHz=fcGHz,WGHz=WGHz,threshdB=thresh,fsGHz=fsGHz)
#W = w.ft()
W = w.ft()
return (w,W)
def ref156(self,beta=0.5):
""" reference pulse of IEEE 802.15.6 UWB standard
Parameters
----------
beta : float
roll-off factor
Tns = 1/499.2MHz
Notes
-----
From P8O2.15.6/D02 December 2010 Formula 96 p 215
"""
Tw = self['twns']
fs = self['fsGHz']
Np = Tw*fs
Ts = 1./fs
beta = 0.5
Tns = 1./0.4992
x = np.linspace(-0.5*Tw+Ts/2, 0.5*Tw+Ts/2, Np, endpoint=False)
z = x/Tns
t1 = np.sin(np.pi*(1-beta)*z)
t2 = np.cos(np.pi*(1+beta)*z)
t3 = (np.pi*z)*(1-(4*beta*z)**2)
y = (t1 + 4*beta*z*t2)/t3
st = bs.TUsignal()
st.x = x
st.y = y[None,:]
sf = st.ftshift()
return(st,sf)
def fromfile(self):
""" get the measurement waveform from WHERE1 measurement campaign
This function is not yet generic
>>> from pylayers.signal.waveform import *
>>> wav = Waveform(typ='W1compensate')
>>> wav.show()
"""
M = mesuwb.UWBMeasure(1,h=1)
w = bs.TUsignal()
ts = M.RAW_DATA.timetx[0]
tns = ts*1e9
ts = tns[1]-tns[0]
y = M.RAW_DATA.tx[0]
# find peak position u is the index of the peak
# yap :after peak
# ybp : before peak
# yzp : zero padding
maxy = max(y)
u = np.where(y ==maxy)[0][0]
yap = y[u:]
ybp = y[0:u]
yzp = np.zeros(len(yap)-len(ybp)-1)
tnsp = np.arange(0, tns[-1]-tns[u]+0.5*ts, ts)
tnsm = np.arange(-(tns[-1]-tns[u]), 0, ts)
y = np.hstack((yzp, np.hstack((ybp, yap))))
tns = np.hstack((tnsm, tnsp))
#
# Warning (check if 1/sqrt(30) is not applied elsewhere
#
w.x = tns
w.y = y[None,:]*(1/np.sqrt(30))
# w : TUsignal
# W : FUsignal (Hermitian redundancy removed)
W = w.ftshift()
return (w,W)
def fromfile2(self):
"""
get the measurement waveform from WHERE1 measurement campaign
This function is not yet generic
>>> from pylayers.signal.waveform import *
>>> wav = Waveform(typ='W1offset')
>>> wav.show()
"""
M = mesuwb.UWBMeasure(1,1)
w = bs.TUsignal()
ts = M.RAW_DATA.timetx[0]
tns = ts*1e9
Ts = tns[1]-tns[0]
y = M.RAW_DATA.tx[0]
# find peak position u is the index of the peak
# yap :after peak
# ybp : before peak
# yzp : zero padding
# maxy = max(y)
# u = np.where(y ==maxy)[0][0]
# yap = y[u:]
# ybp = y[0:u]
yzp = np.zeros(len(y)-1)
# tnsp = np.arange(0,tns[-1]-tns[u]+0.5*ts,ts)
# tnsm = np.arange(-(tns[-1]-tns[u]),0,ts)
N = len(ts)-1
tnsm = np.linspace(-tns[-1],-Ts,N)
y = np.hstack((yzp,y))
tns = np.hstack((tnsm,tns))
#
# Warning (check if 1/sqrt(30) is not applied elsewhere
#
w.x = tns
w.y = (y*(1/np.sqrt(30)))[None,:]
# w : TUsignal
# W : FUsignal (Hermitian redundancy removed)
W = w.ftshift()
return (w,W)
def read(self,config):
"""
Parameters
----------
config : ConfigParser object
Returns
-------
w : waveform
"""
par = config.items("waveform")
for k in range(len(par)):
key = par[k][0]
val = par[k][1]
if key == "WGHz":
self[key] = float(val)
if key == "fcGHz":
self[key] = float(val)
if key == "feGHz":
self[key] = float(val)
if key == "threshdB":
self[key] = float(val)
if key == "twns":
self[key] = float(val)
if key == "typ":
self[key] = val
self.eval()
def bandwidth(self,th_ratio=10000,Npt=100):
""" Determine effective bandwidth of the waveform.
Parameters
----------
th_ratio : float
threshold ratio
threshold = max(abs())/th_ratio
Npt : Number of points
"""
u=np.where(np.abs(self.sf.y)>np.max(np.abs(self.sf.y))/th_ratio)
#fGHz = self.sf.x[u[1]]
fGHz_start = self.sf.x[u[1]][0]
fGHz_stop = self.sf.x[u[1]][-1]
fGHz = np.linspace(fGHz_start,fGHz_stop,Npt)
return fGHz
def gui(self):
"""
Get the Waveform parameter
"""
if self['typ'] == 'generic':
self.st.plot()
show()
wavegui = multenterbox('','Waveform Parameter',
('Tw (ns) integer value',
'fc (GHz)',
'W (GHz)',
'thresh (dB)',
'fs (GHz) integer value'),
( self['twns'] ,
self['fcGHz'] ,
self['WGHz'] ,
self['threshdB'],
self['feGHz']
))
self.parameters['Twns'] = eval(wavegui[0])
self.parameters['fcGHz'] = eval(wavegui[1])
self.parameters['WGHz'] = eval(wavegui[2])
self.parameters['threshdB'] = eval(wavegui[3])
self.parameters['fsGHz'] = eval(wavegui[4])
[st,sf] = self.ip_generic()
self.st = st
self.sf = sf
st.plot()
show()
def show(self,fig=[]):
""" show waveform in time and frequency domain
Parameters
----------
fig : figure
"""
# title construction
if fig ==[]:
fig = plt.figure()
title =''
for pk in self.keys():
val = self[pk]
title = title + pk + ': '
if type(val) != 'str':
title = title + str(val) + ' '
#plt.title(title)
ax1 = fig.add_subplot(2,1,1)
ax1.plot(self.st.x,self.st.y[0,:])
plt.xlabel('time (ns)')
plt.ylabel('level in linear scale')
ax2 = fig.add_subplot(2,1,2)
ax2.plot(self.sf.x,abs(self.sf.y[0,:]))
plt.xlabel('frequency (GHz)')
plt.ylabel('level in linear scale')
fig.suptitle(title)
if __name__ == "__main__":
plt.ion()
doctest.testmod()
|
pylayers/pylayers
|
pylayers/signal/waveform.py
|
Python
|
mit
| 10,638
|
[
"Gaussian"
] |
503f7da25bea1e72299ec8c7c5c09cd7da5f43a14ba2fca5461723145b121240
|
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
r"""
Mean Squared Displacement --- :mod:`MDAnalysis.analysis.msd`
==============================================================
:Authors: Hugo MacDermott-Opeskin
:Year: 2020
:Copyright: GNU Public License v2
This module implements the calculation of Mean Squared Displacements (MSDs)
by the Einstein relation. MSDs can be used to characterize the speed at
which particles move and has its roots in the study of Brownian motion.
For a full explanation of the theory behind MSDs and the subsequent
calculation of self-diffusivities the reader is directed to [Maginn2019]_.
MSDs can be computed from the following expression, known as the
**Einstein formula**:
.. math::
MSD(r_{d}) = \bigg{\langle} \frac{1}{N} \sum_{i=1}^{N} |r_{d}
- r_{d}(t_0)|^2 \bigg{\rangle}_{t_{0}}
where :math:`N` is the number of equivalent particles the MSD is calculated
over, :math:`r` are their coordinates and :math:`d` the desired dimensionality
of the MSD. Note that while the definition of the MSD is universal, there are
many practical considerations to computing the MSD that vary between
implementations. In this module, we compute a "windowed" MSD, where the MSD
is averaged over all possible lag-times :math:`\tau \le \tau_{max}`,
where :math:`\tau_{max}` is the length of the trajectory, thereby maximizing
the number of samples.
The computation of the MSD in this way can be computationally intensive due to
its :math:`N^2` scaling with respect to :math:`\tau_{max}`. An algorithm to
compute the MSD with :math:`N log(N)` scaling based on a Fast Fourier
Transform is known and can be accessed by setting ``fft=True`` [Calandri2011]_
[Buyl2018]_. The FFT-based approach requires that the
`tidynamics <https://github.com/pdebuyl-lab/tidynamics>`_ package is
installed; otherwise the code will raise an :exc:`ImportError`.
Please cite [Calandri2011]_ [Buyl2018]_ if you use this module in addition to
the normal MDAnalysis citations.
.. warning::
To correctly compute the MSD using this analysis module, you must supply
coordinates in the **unwrapped** convention. That is, when atoms pass
the periodic boundary, they must not be **wrapped** back into the primary
simulation cell. MDAnalysis does not currently offer this functionality in
the ``MDAnalysis.transformations`` API despite having functions with
similar names. We plan to implement the appropriate transformations in the
future. In the meantime, various simulation packages provide utilities to
convert coordinates to the unwrapped convention. In GROMACS for example,
this can be done using ``gmx trjconv`` with the ``-pbc nojump`` flag.
Computing an MSD
----------------
This example computes a 3D MSD for the movement of 100 particles undergoing a
random walk. Files provided as part of the MDAnalysis test suite are used
(in the variables :data:`~MDAnalysis.tests.datafiles.RANDOM_WALK` and
:data:`~MDAnalysis.tests.datafiles.RANDOM_WALK_TOPO`)
First load all modules and test data
.. code-block:: python
import MDAnalysis as mda
import MDAnalysis.analysis.msd as msd
from MDAnalysis.tests.datafiles import RANDOM_WALK, RANDOM_WALK_TOPO
Given a universe containing trajectory data we can extract the MSD
analysis by using the class :class:`EinsteinMSD`
.. code-block:: python
u = mda.Universe(RANDOM_WALK, RANDOM_WALK_TOPO)
MSD = msd.EinsteinMSD(u, select='all', msd_type='xyz', fft=True)
MSD.run()
The MSD can then be accessed as
.. code-block:: python
msd = MSD.results.timeseries
Visual inspection of the MSD is important, so let's take a look at it with a
simple plot.
.. code-block:: python
import matplotlib.pyplot as plt
nframes = MSD.n_frames
timestep = 1 # this needs to be the actual time between frames
lagtimes = np.arange(nframes)*timestep # make the lag-time axis
fig = plt.figure()
ax = plt.axes()
# plot the actual MSD
ax.plot(lagtimes, msd, lc="black", ls="-", label=r'3D random walk')
exact = lagtimes*6
# plot the exact result
ax.plot(lagtimes, exact, lc="black", ls="--", label=r'$y=2 D\tau$')
plt.show()
This gives us the plot of the MSD with respect to lag-time (:math:`\tau`).
We can see that the MSD is approximately linear with respect to :math:`\tau`.
This is a numerical example of a known theoretical result that the MSD of a
random walk is linear with respect to lag-time, with a slope of :math:`2d`.
In this expression :math:`d` is the dimensionality of the MSD. For our 3D MSD,
this is 3. For comparison we have plotted the line :math:`y=6\tau` to which an
ensemble of 3D random walks should converge.
.. _figure-msd:
.. figure:: /images/msd_demo_plot.png
:scale: 100 %
:alt: MSD plot
Note that a segment of the MSD is required to be linear to accurately
determine self-diffusivity. This linear segment represents the so called
"middle" of the MSD plot, where ballistic trajectories at short time-lags are
excluded along with poorly averaged data at long time-lags. We can select the
"middle" of the MSD by indexing the MSD and the time-lags. Appropriately
linear segments of the MSD can be confirmed with a log-log plot as is often
reccomended [Maginn2019]_ where the "middle" segment can be identified as
having a slope of 1.
.. code-block:: python
plt.loglog(lagtimes, msd)
plt.show()
Now that we have identified what segment of our MSD to analyse, let's compute
a self-diffusivity.
Computing Self-Diffusivity
--------------------------------
Self-diffusivity is closely related to the MSD.
.. math::
D_d = \frac{1}{2d} \lim_{t \to \infty} \frac{d}{dt} MSD(r_{d})
From the MSD, self-diffusivities :math:`D` with the desired dimensionality
:math:`d` can be computed by fitting the MSD with respect to the lag-time to
a linear model. An example of this is shown below, using the MSD computed in
the example above. The segment between :math:`\tau = 20` and :math:`\tau = 60`
is used to demonstrate selection of a MSD segment.
.. code-block:: python
from scipy.stats import linregress
start_time = 20
start_index = int(start_time/timestep)
end_time = 60
linear_model = linregress(lagtimes[start_index:end_index],
msd[start_index:end_index])
slope = linear_model.slope
error = linear_model.rvalue
# dim_fac is 3 as we computed a 3D msd with 'xyz'
D = slope * 1/(2*MSD.dim_fac)
We have now computed a self-diffusivity!
Notes
_____
There are several factors that must be taken into account when setting up and
processing trajectories for computation of self-diffusivities.
These include specific instructions around simulation settings, using
unwrapped trajectories and maintaining a relatively small elapsed time between
saved frames. Additionally, corrections for finite size effects are sometimes
employed along with various means of estimating errors [Yeh2004]_ [Bulow2020]_.
The reader is directed to the following review, which describes many of the
common pitfalls [Maginn2019]_. There are other ways to compute
self-diffusivity, such as from a Green-Kubo integral. At this point in time,
these methods are beyond the scope of this module.
Note also that computation of MSDs is highly memory intensive. If this is
proving a problem, judicious use of the ``start``, ``stop``, ``step`` keywords to control which frames are incorporated may be required.
References
----------
.. [Maginn2019] Maginn, E. J., Messerly, R. A., Carlson, D. J.; Roe, D. R.,
Elliott, J. R. Best Practices for Computing Transport
Properties 1. Self-Diffusivity and Viscosity from Equilibrium
Molecular Dynamics [Article v1.0]. Living J. Comput. Mol. Sci.
2019, 1 (1).
.. [Yeh2004] Yeh, I. C.; Hummer, G. System-Size Dependence of Diffusion
Coefficients and Viscosities from Molecular Dynamics
Simulations with Periodic Boundary Conditions.
J. Phys. Chem. B 2004, 108 (40), 15873–15879.
.. [Bulow2020] von Bülow, S.; Bullerjahn, J. T.; Hummer, G. Systematic
Errors in Diffusion Coefficients from Long-Time Molecular
Dynamics Simulations at Constant Pressure. 2020.
arXiv:2003.09205 [Cond-Mat, Physics:Physics].
Classes
-------
.. autoclass:: EinsteinMSD
:members:
:inherited-members:
"""
import numpy as np
import logging
from ..due import due, Doi
from .base import AnalysisBase
from ..core import groups
logger = logging.getLogger('MDAnalysis.analysis.msd')
due.cite(Doi("10.21105/joss.00877"),
description="Mean Squared Displacements with tidynamics",
path="MDAnalysis.analysis.msd",
cite_module=True)
due.cite(Doi("10.1051/sfn/201112010"),
description="FCA fast correlation algorithm",
path="MDAnalysis.analysis.msd",
cite_module=True)
del Doi
class EinsteinMSD(AnalysisBase):
r"""Class to calculate Mean Squared Displacement by the Einstein relation.
Parameters
----------
u : Universe or AtomGroup
An MDAnalysis :class:`Universe` or :class:`AtomGroup`.
Note that :class:`UpdatingAtomGroup` instances are not accepted.
select : str
A selection string. Defaults to "all" in which case
all atoms are selected.
msd_type : {'xyz', 'xy', 'yz', 'xz', 'x', 'y', 'z'}
Desired dimensions to be included in the MSD. Defaults to 'xyz'.
fft : bool
If ``True``, uses a fast FFT based algorithm for computation of
the MSD. Otherwise, use the simple "windowed" algorithm.
The tidynamics package is required for `fft=True`.
Defaults to ``True``.
Attributes
----------
dim_fac : int
Dimensionality :math:`d` of the MSD.
results.timeseries : :class:`numpy.ndarray`
The averaged MSD over all the particles with respect to lag-time.
results.msds_by_particle : :class:`numpy.ndarray`
The MSD of each individual particle with respect to lag-time.
ag : :class:`AtomGroup`
The :class:`AtomGroup` resulting from your selection
n_frames : int
Number of frames included in the analysis.
n_particles : int
Number of particles MSD was calculated over.
.. versionadded:: 2.0.0
"""
def __init__(self, u, select='all', msd_type='xyz', fft=True, **kwargs):
r"""
Parameters
----------
u : Universe or AtomGroup
An MDAnalysis :class:`Universe` or :class:`AtomGroup`.
select : str
A selection string. Defaults to "all" in which case
all atoms are selected.
msd_type : {'xyz', 'xy', 'yz', 'xz', 'x', 'y', 'z'}
Desired dimensions to be included in the MSD.
fft : bool
If ``True``, uses a fast FFT based algorithm for computation of
the MSD. Otherwise, use the simple "windowed" algorithm.
The tidynamics package is required for `fft=True`.
"""
if isinstance(u, groups.UpdatingAtomGroup):
raise TypeError("UpdatingAtomGroups are not valid for MSD "
"computation")
super(EinsteinMSD, self).__init__(u.universe.trajectory, **kwargs)
# args
self.select = select
self.msd_type = msd_type
self._parse_msd_type()
self.fft = fft
# local
self.ag = u.select_atoms(self.select)
self.n_particles = len(self.ag)
self._position_array = None
# result
self.results.msds_by_particle = None
self.results.timeseries = None
def _prepare(self):
# self.n_frames only available here
# these need to be zeroed prior to each run() call
self.results.msds_by_particle = np.zeros((self.n_frames,
self.n_particles))
self._position_array = np.zeros(
(self.n_frames, self.n_particles, self.dim_fac))
# self.results.timeseries not set here
def _parse_msd_type(self):
r""" Sets up the desired dimensionality of the MSD.
"""
keys = {'x': [0], 'y': [1], 'z': [2], 'xy': [0, 1],
'xz': [0, 2], 'yz': [1, 2], 'xyz': [0, 1, 2]}
self.msd_type = self.msd_type.lower()
try:
self._dim = keys[self.msd_type]
except KeyError:
raise ValueError(
'invalid msd_type: {} specified, please specify one of xyz, '
'xy, xz, yz, x, y, z'.format(self.msd_type))
self.dim_fac = len(self._dim)
def _single_frame(self):
r""" Constructs array of positions for MSD calculation.
"""
# shape of position array set here, use span in last dimension
# from this point on
self._position_array[self._frame_index] = (
self.ag.positions[:, self._dim])
def _conclude(self):
if self.fft:
self._conclude_fft()
else:
self._conclude_simple()
def _conclude_simple(self):
r""" Calculates the MSD via the simple "windowed" algorithm.
"""
lagtimes = np.arange(1, self.n_frames)
positions = self._position_array.astype(np.float64)
for lag in lagtimes:
disp = positions[:-lag, :, :] - positions[lag:, :, :]
sqdist = np.square(disp).sum(axis=-1)
self.results.msds_by_particle[lag, :] = np.mean(sqdist, axis=0)
self.results.timeseries = self.results.msds_by_particle.mean(axis=1)
def _conclude_fft(self): # with FFT, np.float64 bit prescision required.
r""" Calculates the MSD via the FCA fast correlation algorithm.
"""
try:
import tidynamics
except ImportError:
raise ImportError("""ERROR --- tidynamics was not found!
tidynamics is required to compute an FFT based MSD (default)
try installing it using pip eg:
pip install tidynamics
or set fft=False""")
positions = self._position_array.astype(np.float64)
for n in range(self.n_particles):
self.results.msds_by_particle[:, n] = tidynamics.msd(
positions[:, n, :])
self.results.timeseries = self.results.msds_by_particle.mean(axis=1)
|
MDAnalysis/mdanalysis
|
package/MDAnalysis/analysis/msd.py
|
Python
|
gpl-2.0
| 15,378
|
[
"Gromacs",
"MDAnalysis"
] |
9d02e1c4ae470a04ff71761af10d6551c5a7e1688ed9e2ca7d652d57e499763b
|
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import codecs
import os.path
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
scripts = [
'scripts/upho_weights',
'scripts/upho_sf',
'scripts/upho_qpoints',
'scripts/upho_fit',
]
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name='upho', # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=get_version('upho/__init__.py'), # Required
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url='https://github.com/yuzie007/upho', # Optional
# This should be your name or the name of the organization which owns the
# project.
author='Yuji Ikeda', # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email='yuji.ikeda.ac.jp@gmail.com', # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(), # Required
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['phonopy>=2.7.0'], # Optional
scripts=scripts)
|
yuzie007/upho
|
setup.py
|
Python
|
mit
| 2,931
|
[
"phonopy"
] |
fb5dd9e6b644f1fa99074aa33e9c59a25282f4dd11af58e373a176201894755d
|
"""Handle extraction of final files from processing pipelines into storage.
"""
import datetime
import os
import toolz as tz
from bcbio import log, utils
from bcbio.upload import shared, filesystem, galaxy, s3
from bcbio.pipeline import run_info
import bcbio.pipeline.datadict as dd
_approaches = {"filesystem": filesystem,
"galaxy": galaxy,
"s3": s3}
def project_from_sample(sample):
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files_project(sample, upload_config):
approach.update_file(finfo, None, upload_config)
return [[sample]]
def from_sample(sample):
"""Upload results of processing from an analysis pipeline sample.
"""
upload_config = sample.get("upload")
if upload_config:
approach = _approaches[upload_config.get("method", "filesystem")]
for finfo in _get_files(sample):
approach.update_file(finfo, sample, upload_config)
return [[sample]]
# ## File information from sample
def _get_files(sample):
"""Retrieve files for the sample, dispatching by analysis type.
Each file is a dictionary containing the path plus associated
metadata about the file and pipeline versions.
"""
analysis = sample.get("analysis")
if analysis.lower() in ["variant", "snp calling", "variant2", "standard"]:
return _get_files_variantcall(sample)
elif analysis in ["RNA-seq"]:
return _get_files_rnaseq(sample)
elif analysis.lower() in ["smallrna-seq"]:
return _get_files_srnaseq(sample)
elif analysis.lower() in ["chip-seq"]:
return _get_files_chipseq(sample)
elif analysis.lower() in ["sailfish"]:
return _get_files_sailfish(sample)
else:
return []
def _get_files_sailfish(sample):
out = []
out.append({"path": sample["sailfish_dir"],
"type": "directory",
"ext": "sailfish"})
return _add_meta(out, sample)
def _get_files_rnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_disambiguate(algorithm, sample, out)
out = _maybe_add_counts(algorithm, sample, out)
out = _maybe_add_cufflinks(algorithm, sample, out)
out = _maybe_add_oncofuse(algorithm, sample, out)
out = _maybe_add_rnaseq_variant_file(algorithm, sample, out)
return _add_meta(out, sample)
def _get_files_srnaseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_trimming(algorithm, sample, out)
out = _maybe_add_seqbuster(algorithm, sample, out)
return _add_meta(out, sample)
def _get_files_chipseq(sample):
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
return _add_meta(out, sample)
def _add_meta(xs, sample=None, config=None):
out = []
for x in xs:
x["mtime"] = shared.get_file_timestamp(x["path"])
if sample and "sample" not in x:
if isinstance(sample["name"], (tuple, list)):
name = sample["name"][-1]
else:
name = "%s-%s" % (sample["name"],
run_info.clean_name(sample["description"]))
x["sample"] = name
if config:
if "fc_name" in config and "fc_date" in config:
x["run"] = "%s_%s" % (config["fc_date"], config["fc_name"])
else:
x["run"] = "project_%s" % datetime.datetime.now().strftime("%Y-%m-%d")
out.append(x)
return out
def _get_files_variantcall(sample):
"""Return output files for the variant calling pipeline.
"""
out = []
algorithm = sample["config"]["algorithm"]
out = _maybe_add_summary(algorithm, sample, out)
out = _maybe_add_alignment(algorithm, sample, out)
out = _maybe_add_disambiguate(algorithm, sample, out)
out = _maybe_add_variant_file(algorithm, sample, out)
out = _maybe_add_sv(algorithm, sample, out)
out = _maybe_add_validate(algorithm, sample, out)
return _add_meta(out, sample)
def _maybe_add_validate(algorith, sample, out):
for i, plot in enumerate(tz.get_in(("validate", "grading_plots"), sample, [])):
ptype = os.path.splitext(plot)[-1].replace(".", "")
out.append({"path": plot,
"type": ptype,
"ext": "validate%s" % ("" if i == 0 else "-%s" % (i + 1))})
return out
def _maybe_add_rnaseq_variant_file(algorithm, sample, out):
if sample.get("vrn_file"):
out.append({"path": sample.get("vrn_file"),
"type": "vcf",
"ext": "vcf"})
return out
def _maybe_add_variant_file(algorithm, sample, out):
if sample.get("align_bam") is not None and sample.get("vrn_file"):
for x in sample["variants"]:
out.extend(_get_variant_file(x, ("vrn_file",)))
if x.get("bed_file"):
out.append({"path": x["bed_file"],
"type": "bed",
"ext": "%s-callregions" % x["variantcaller"],
"variantcaller": x["variantcaller"]})
if x.get("vrn_stats"):
for extra, fname in x["vrn_stats"].items():
ext = utils.splitext_plus(fname)[-1].replace(".", "")
out.append({"path": fname,
"type": ext,
"ext": "%s-%s" % (x["variantcaller"], extra),
"variantcaller": x["variantcaller"]})
return out
def _maybe_add_sv(algorithm, sample, out):
if sample.get("align_bam") is not None and sample.get("sv"):
for svcall in sample["sv"]:
for key in ["vrn_file", "cnr", "cns", "cnr_bed", "cnr_bedgraph", "seg",
"gainloss", "segmetrics", "vrn_bed", "vrn_bedpe"]:
out.extend(_get_variant_file(svcall, (key,)))
if "validate" in svcall:
for vkey in ["csv", "plot", "df"]:
vfile = tz.get_in(["validate", vkey], svcall)
if vfile:
to_u = []
if isinstance(vfile, dict):
for svtype, fname in vfile.items():
to_u.append((fname, "-%s" % svtype))
else:
to_u.append((vfile, "-%s" % vkey if vkey in ["df"] else ""))
for vfile, ext in to_u:
vext = os.path.splitext(vfile)[-1].replace(".", "")
out.append({"path": vfile,
"type": vext,
"ext": "%s-sv-validate%s" % (svcall["variantcaller"], ext),
"variantcaller": svcall["variantcaller"]})
if "plot" in svcall:
for plot_name, fname in svcall["plot"].items():
ext = os.path.splitext(fname)[-1].replace(".", "")
out.append({"path": fname,
"type": ext,
"ext": "%s-%s" % (svcall["variantcaller"], plot_name),
"variantcaller": svcall["variantcaller"]})
return out
def _get_variant_file(x, key):
"""Retrieve VCF file with the given key if it exists, handling bgzipped.
"""
out = []
fname = utils.get_in(x, key)
upload_key = list(key)
upload_key[-1] = "do_upload"
do_upload = tz.get_in(tuple(upload_key), x, True)
if fname and do_upload:
if fname.endswith(".vcf.gz"):
out.append({"path": fname,
"type": "vcf.gz",
"ext": x["variantcaller"],
"variantcaller": x["variantcaller"]})
if utils.file_exists(fname + ".tbi"):
out.append({"path": fname + ".tbi",
"type": "vcf.gz.tbi",
"index": True,
"ext": x["variantcaller"],
"variantcaller": x["variantcaller"]})
elif fname.endswith((".vcf", ".bed", ".bedpe", ".bedgraph", ".cnr", ".cns", ".cnn", ".txt")):
ftype = utils.splitext_plus(fname)[-1][1:]
if ftype == "txt":
ftype = fname.split("-")[-1]
out.append({"path": fname,
"type": ftype,
"ext": x["variantcaller"],
"variantcaller": x["variantcaller"]})
return out
def _maybe_add_summary(algorithm, sample, out):
out = []
if "summary" in sample:
if sample["summary"].get("pdf"):
out.append({"path": sample["summary"]["pdf"],
"type": "pdf",
"ext": "summary"})
if sample["summary"].get("qc"):
out.append({"path": sample["summary"]["qc"],
"type": "directory",
"ext": "qc"})
if utils.get_in(sample, ("summary", "researcher")):
out.append({"path": sample["summary"]["researcher"],
"type": "tsv",
"sample": run_info.clean_name(utils.get_in(sample, ("upload", "researcher"))),
"ext": "summary"})
return out
def _maybe_add_alignment(algorithm, sample, out):
if _has_alignment_file(algorithm, sample):
for (fname, ext, isplus) in [(sample.get("work_bam"), "ready", False),
(utils.get_in(sample, ("work_bam-plus", "disc")), "disc", True),
(utils.get_in(sample, ("work_bam-plus", "sr")), "sr", True)]:
if fname and os.path.exists(fname):
if fname.endswith("bam"):
ftype, fext = "bam", ".bai"
elif fname.endswith("cram"):
ftype, fext = "cram", ".crai"
else:
raise ValueError("Unexpected alignment file type %s" % fname)
out.append({"path": fname,
"type": ftype,
"plus": isplus,
"ext": ext})
if utils.file_exists(fname + fext):
out.append({"path": fname + fext,
"type": ftype + fext,
"plus": isplus,
"index": True,
"ext": ext})
return out
def _maybe_add_disambiguate(algorithm, sample, out):
if "disambiguate" in sample:
for extra_name, fname in sample["disambiguate"].items():
ftype = os.path.splitext(fname)[-1].replace(".", "")
fext = ".bai" if ftype == "bam" else ""
if fname and os.path.exists(fname):
out.append({"path": fname,
"type": ftype,
"plus": True,
"ext": "disambiguate-%s" % extra_name})
if fext and utils.file_exists(fname + fext):
out.append({"path": fname + fext,
"type": ftype + fext,
"plus": True,
"index": True,
"ext": "disambiguate-%s" % extra_name})
return out
def _maybe_add_counts(algorithm, sample, out):
out.append({"path": sample["count_file"],
"type": "counts",
"ext": "ready"})
stats_file = os.path.splitext(sample["count_file"])[0] + ".stats"
if utils.file_exists(stats_file):
out.append({"path": stats_file,
"type": "count_stats",
"ext": "ready"})
return out
def _maybe_add_oncofuse(algorithm, sample, out):
if sample.get("oncofuse_file", None) is not None:
out.append({"path": sample["oncofuse_file"],
"type": "oncofuse_outfile",
"ext": "ready"})
return out
def _maybe_add_cufflinks(algorithm, sample, out):
if "cufflinks_dir" in sample:
out.append({"path": sample["cufflinks_dir"],
"type": "directory",
"ext": "cufflinks"})
return out
def _maybe_add_trimming(algorithm, sample, out):
fn = sample["collapse"] + "_size_stats"
if utils.file_exists(fn):
out.append({"path": fn,
"type": "trimming_stats",
"ext": "ready"})
return out
def _maybe_add_seqbuster(algorithm, sample, out):
fn = sample["seqbuster"]
if utils.file_exists(fn):
out.append({"path": fn,
"type": "counts",
"ext": "ready"})
return out
def _has_alignment_file(algorithm, sample):
return (((algorithm.get("aligner") or algorithm.get("realign")
or algorithm.get("recalibrate") or algorithm.get("bam_clean")
or algorithm.get("mark_duplicates")) and
algorithm.get("merge_bamprep", True)) and
sample.get("work_bam") is not None)
# ## File information from full project
def _get_files_project(sample, upload_config):
"""Retrieve output files associated with an entire analysis project.
"""
out = [{"path": sample["provenance"]["programs"]}]
for fname in ["bcbio-nextgen.log", "bcbio-nextgen-commands.log"]:
if os.path.exists(os.path.join(log.get_log_dir(sample["config"]), fname)):
out.append({"path": os.path.join(log.get_log_dir(sample["config"]), fname),
"type": "external_command_log",
"ext": ""})
if "summary" in sample and sample["summary"].get("project"):
out.append({"path": sample["summary"]["project"]})
mixup_check = tz.get_in(["summary", "mixup_check"], sample)
if mixup_check:
out.append({"path": sample["summary"]["mixup_check"],
"type": "directory", "ext": "mixup_check"})
if sample.get("seqcluster", None):
out.append({"path": sample["seqcluster"],
"type": "directory", "ext": "seqcluster"})
for x in sample.get("variants", []):
if "pop_db" in x:
out.append({"path": x["pop_db"],
"type": "sqlite",
"variantcaller": x["variantcaller"]})
for x in sample.get("variants", []):
if "population" in x:
pop_db = tz.get_in(["population", "db"], x)
if pop_db:
out.append({"path": pop_db,
"type": "sqlite",
"variantcaller": x["variantcaller"]})
out.extend(_get_variant_file(x, ("population", "vcf")))
for x in sample.get("variants", []):
if x.get("validate") and x["validate"].get("grading_summary"):
out.append({"path": x["validate"]["grading_summary"]})
break
if "coverage" in sample:
cov_db = tz.get_in(["coverage", "summary"], sample)
if cov_db:
out.append({"path": cov_db, "type": "sqlite", "ext": "coverage"})
all_coverage = tz.get_in(["coverage", "all"], sample)
if all_coverage:
out.append({"path": all_coverage, "type": "bed", "ext": "coverage"})
if dd.get_combined_counts(sample):
out.append({"path": dd.get_combined_counts(sample)})
if dd.get_annotated_combined_counts(sample):
out.append({"path": dd.get_annotated_combined_counts(sample)})
if dd.get_combined_fpkm(sample):
out.append({"path": dd.get_combined_fpkm(sample)})
if dd.get_combined_fpkm_isoform(sample):
out.append({"path": dd.get_combined_fpkm_isoform(sample)})
if dd.get_assembled_gtf(sample):
out.append({"path": dd.get_assembled_gtf(sample)})
if dd.get_dexseq_counts(sample):
out.append({"path": dd.get_dexseq_counts(sample)})
if dd.get_express_counts(sample):
out.append({"path": dd.get_express_counts(sample)})
if dd.get_express_fpkm(sample):
out.append({"path": dd.get_express_fpkm(sample)})
if dd.get_express_tpm(sample):
out.append({"path": dd.get_express_tpm(sample)})
if dd.get_isoform_to_gene(sample):
out.append({"path": dd.get_isoform_to_gene(sample)})
if dd.get_square_vcf(sample):
out.append({"path": dd.get_square_vcf(sample)})
return _add_meta(out, config=upload_config)
|
elkingtonmcb/bcbio-nextgen
|
bcbio/upload/__init__.py
|
Python
|
mit
| 16,810
|
[
"Galaxy"
] |
67919c8afe86b585de0eddda54d28a76e252b698905ab8623ca5a8f22bd8569c
|
# Python test set -- part 1, grammar.
# This just tests whether the parser accepts them all.
from test_support import *
print '1. Parser'
print '1.1 Tokens'
print '1.1.1 Backslashes'
# Backslash means line continuation:
x = 1 \
+ 1
if x != 2: raise TestFailed, 'backslash for line continuation'
# Backslash does not means continuation in comments :\
x = 0
if x != 0: raise TestFailed, 'backslash ending comment'
print '1.1.2 Numeric literals'
print '1.1.2.1 Plain integers'
if 0xff != 255: raise TestFailed, 'hex int'
if 0377 != 255: raise TestFailed, 'octal int'
if 2147483647 != 017777777777: raise TestFailed, 'large positive int'
try:
from sys import maxint
except ImportError:
maxint = 2147483647
if maxint == 2147483647:
if -2147483647-1 != 020000000000: raise TestFailed, 'max negative int'
# XXX -2147483648
if 037777777777 != -1: raise TestFailed, 'oct -1'
if 0xffffffff != -1: raise TestFailed, 'hex -1'
for s in '2147483648', '040000000000', '0x100000000':
try:
x = eval(s)
except OverflowError:
continue
## raise TestFailed, \
print \
'No OverflowError on huge integer literal ' + `s`
elif eval('maxint == 9223372036854775807'):
if eval('-9223372036854775807-1 != 01000000000000000000000'):
raise TestFailed, 'max negative int'
if eval('01777777777777777777777') != -1: raise TestFailed, 'oct -1'
if eval('0xffffffffffffffff') != -1: raise TestFailed, 'hex -1'
for s in '9223372036854775808', '02000000000000000000000', \
'0x10000000000000000':
try:
x = eval(s)
except OverflowError:
continue
raise TestFailed, \
'No OverflowError on huge integer literal ' + `s`
else:
print 'Weird maxint value', maxint
print '1.1.2.2 Long integers'
x = 0L
x = 0l
x = 0xffffffffffffffffL
x = 0xffffffffffffffffl
x = 077777777777777777L
x = 077777777777777777l
x = 123456789012345678901234567890L
x = 123456789012345678901234567890l
print '1.1.2.3 Floating point'
x = 3.14
x = 314.
x = 0.314
# XXX x = 000.314
x = .314
x = 3e14
x = 3E14
x = 3e-14
x = 3e+14
x = 3.e14
x = .3e14
x = 3.1e4
print '1.1.3 String literals'
x = ''; y = ""; verify(len(x) == 0 and x == y)
x = '\''; y = "'"; verify(len(x) == 1 and x == y and ord(x) == 39)
x = '"'; y = "\""; verify(len(x) == 1 and x == y and ord(x) == 34)
x = "doesn't \"shrink\" does it"
y = 'doesn\'t "shrink" does it'
verify(len(x) == 24 and x == y)
x = "does \"shrink\" doesn't it"
y = 'does "shrink" doesn\'t it'
verify(len(x) == 24 and x == y)
x = """
The "quick"
brown fox
jumps over
the 'lazy' dog.
"""
y = '\nThe "quick"\nbrown fox\njumps over\nthe \'lazy\' dog.\n'
verify(x == y)
y = '''
The "quick"
brown fox
jumps over
the 'lazy' dog.
'''; verify(x == y)
y = "\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the 'lazy' dog.\n\
"; verify(x == y)
y = '\n\
The \"quick\"\n\
brown fox\n\
jumps over\n\
the \'lazy\' dog.\n\
'; verify(x == y)
print '1.2 Grammar'
print 'single_input' # NEWLINE | simple_stmt | compound_stmt NEWLINE
# XXX can't test in a script -- this rule is only used when interactive
print 'file_input' # (NEWLINE | stmt)* ENDMARKER
# Being tested as this very moment this very module
print 'expr_input' # testlist NEWLINE
# XXX Hard to test -- used only in calls to input()
print 'eval_input' # testlist ENDMARKER
x = eval('1, 0 or 1')
print 'funcdef'
### 'def' NAME parameters ':' suite
### parameters: '(' [varargslist] ')'
### varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' ('**'|'*' '*') NAME]
### | ('**'|'*' '*') NAME)
### | fpdef ['=' test] (',' fpdef ['=' test])* [',']
### fpdef: NAME | '(' fplist ')'
### fplist: fpdef (',' fpdef)* [',']
### arglist: (argument ',')* (argument | *' test [',' '**' test] | '**' test)
### argument: [test '='] test # Really [keyword '='] test
def f1(): pass
f1()
f1(*())
f1(*(), **{})
def f2(one_argument): pass
def f3(two, arguments): pass
def f4(two, (compound, (argument, list))): pass
def f5((compound, first), two): pass
verify(f2.func_code.co_varnames == ('one_argument',))
verify(f3.func_code.co_varnames == ('two', 'arguments'))
verify(f4.func_code.co_varnames == ('two', '.2', 'compound', 'argument',
'list'))
verify(f5.func_code.co_varnames == ('.0', 'two', 'compound', 'first'))
def a1(one_arg,): pass
def a2(two, args,): pass
def v0(*rest): pass
def v1(a, *rest): pass
def v2(a, b, *rest): pass
def v3(a, (b, c), *rest): return a, b, c, rest
verify(v3.func_code.co_varnames == ('a', '.2', 'rest', 'b', 'c'))
verify(v3(1, (2, 3), 4) == (1, 2, 3, (4,)))
def d01(a=1): pass
d01()
d01(1)
d01(*(1,))
d01(**{'a':2})
def d11(a, b=1): pass
d11(1)
d11(1, 2)
d11(1, **{'b':2})
def d21(a, b, c=1): pass
d21(1, 2)
d21(1, 2, 3)
d21(*(1, 2, 3))
d21(1, *(2, 3))
d21(1, 2, *(3,))
d21(1, 2, **{'c':3})
def d02(a=1, b=2): pass
d02()
d02(1)
d02(1, 2)
d02(*(1, 2))
d02(1, *(2,))
d02(1, **{'b':2})
d02(**{'a': 1, 'b': 2})
def d12(a, b=1, c=2): pass
d12(1)
d12(1, 2)
d12(1, 2, 3)
def d22(a, b, c=1, d=2): pass
d22(1, 2)
d22(1, 2, 3)
d22(1, 2, 3, 4)
def d01v(a=1, *rest): pass
d01v()
d01v(1)
d01v(1, 2)
d01v(*(1, 2, 3, 4))
d01v(*(1,))
d01v(**{'a':2})
def d11v(a, b=1, *rest): pass
d11v(1)
d11v(1, 2)
d11v(1, 2, 3)
def d21v(a, b, c=1, *rest): pass
d21v(1, 2)
d21v(1, 2, 3)
d21v(1, 2, 3, 4)
d21v(*(1, 2, 3, 4))
d21v(1, 2, **{'c': 3})
def d02v(a=1, b=2, *rest): pass
d02v()
d02v(1)
d02v(1, 2)
d02v(1, 2, 3)
d02v(1, *(2, 3, 4))
d02v(**{'a': 1, 'b': 2})
def d12v(a, b=1, c=2, *rest): pass
d12v(1)
d12v(1, 2)
d12v(1, 2, 3)
d12v(1, 2, 3, 4)
d12v(*(1, 2, 3, 4))
d12v(1, 2, *(3, 4, 5))
d12v(1, *(2,), **{'c': 3})
def d22v(a, b, c=1, d=2, *rest): pass
d22v(1, 2)
d22v(1, 2, 3)
d22v(1, 2, 3, 4)
d22v(1, 2, 3, 4, 5)
d22v(*(1, 2, 3, 4))
d22v(1, 2, *(3, 4, 5))
d22v(1, *(2, 3), **{'d': 4})
### lambdef: 'lambda' [varargslist] ':' test
print 'lambdef'
l1 = lambda : 0
verify(l1() == 0)
l2 = lambda : a[d] # XXX just testing the expression
l3 = lambda : [2 < x for x in [-1, 3, 0L]]
verify(l3() == [0, 1, 0])
l4 = lambda x = lambda y = lambda z=1 : z : y() : x()
verify(l4() == 1)
l5 = lambda x, y, z=2: x + y + z
verify(l5(1, 2) == 5)
verify(l5(1, 2, 3) == 6)
check_syntax("lambda x: x = 2")
### stmt: simple_stmt | compound_stmt
# Tested below
### simple_stmt: small_stmt (';' small_stmt)* [';']
print 'simple_stmt'
x = 1; pass; del x
### small_stmt: expr_stmt | print_stmt | pass_stmt | del_stmt | flow_stmt | import_stmt | global_stmt | access_stmt | exec_stmt
# Tested below
print 'expr_stmt' # (exprlist '=')* exprlist
1
1, 2, 3
x = 1
x = 1, 2, 3
x = y = z = 1, 2, 3
x, y, z = 1, 2, 3
abc = a, b, c = x, y, z = xyz = 1, 2, (3, 4)
# NB these variables are deleted below
check_syntax("x + 1 = 1")
check_syntax("a + 1 = b + 2")
print 'print_stmt' # 'print' (test ',')* [test]
print 1, 2, 3
print 1, 2, 3,
print
print 0 or 1, 0 or 1,
print 0 or 1
print 'extended print_stmt' # 'print' '>>' test ','
import sys
print >> sys.stdout, 1, 2, 3
print >> sys.stdout, 1, 2, 3,
print >> sys.stdout
print >> sys.stdout, 0 or 1, 0 or 1,
print >> sys.stdout, 0 or 1
# test printing to an instance
class Gulp:
def write(self, msg): pass
gulp = Gulp()
print >> gulp, 1, 2, 3
print >> gulp, 1, 2, 3,
print >> gulp
print >> gulp, 0 or 1, 0 or 1,
print >> gulp, 0 or 1
# test print >> None
def driver():
oldstdout = sys.stdout
sys.stdout = Gulp()
try:
tellme(Gulp())
tellme()
finally:
sys.stdout = oldstdout
# we should see this once
def tellme(file=sys.stdout):
print >> file, 'hello world'
driver()
# we should not see this at all
def tellme(file=None):
print >> file, 'goodbye universe'
driver()
# syntax errors
check_syntax('print ,')
check_syntax('print >> x,')
print 'del_stmt' # 'del' exprlist
del abc
del x, y, (z, xyz)
print 'pass_stmt' # 'pass'
pass
print 'flow_stmt' # break_stmt | continue_stmt | return_stmt | raise_stmt
# Tested below
print 'break_stmt' # 'break'
while 1: break
print 'continue_stmt' # 'continue'
i = 1
while i: i = 0; continue
msg = ""
while not msg:
msg = "continue + try/except ok"
try:
continue
msg = "continue failed to continue inside try"
except:
msg = "continue inside try called except block"
print msg
msg = ""
while not msg:
msg = "finally block not called"
try:
continue
finally:
msg = "continue + try/finally ok"
print msg
print 'return_stmt' # 'return' [testlist]
def g1(): return
def g2(): return 1
g1()
x = g2()
print 'raise_stmt' # 'raise' test [',' test]
try: raise RuntimeError, 'just testing'
except RuntimeError: pass
try: raise KeyboardInterrupt
except KeyboardInterrupt: pass
print 'import_stmt' # 'import' NAME (',' NAME)* | 'from' NAME 'import' ('*' | NAME (',' NAME)*)
import sys
import time, sys
from time import time
from sys import *
from sys import path, argv
print 'global_stmt' # 'global' NAME (',' NAME)*
def f():
global a
global a, b
global one, two, three, four, five, six, seven, eight, nine, ten
print 'exec_stmt' # 'exec' expr ['in' expr [',' expr]]
def f():
z = None
del z
exec 'z=1+1\n'
if z != 2: raise TestFailed, 'exec \'z=1+1\'\\n'
del z
exec 'z=1+1'
if z != 2: raise TestFailed, 'exec \'z=1+1\''
z = None
del z
exec u'z=1+1\n'
if z != 2: raise TestFailed, 'exec u\'z=1+1\'\\n'
del z
exec u'z=1+1'
if z != 2: raise TestFailed, 'exec u\'z=1+1\''
f()
g = {}
exec 'z = 1' in g
if g.has_key('__builtins__'): del g['__builtins__']
if g != {'z': 1}: raise TestFailed, 'exec \'z = 1\' in g'
g = {}
l = {}
import warnings
warnings.filterwarnings("ignore", "global statement", module="<string>")
exec 'global a; a = 1; b = 2' in g, l
if g.has_key('__builtins__'): del g['__builtins__']
if l.has_key('__builtins__'): del l['__builtins__']
if (g, l) != ({'a':1}, {'b':2}): raise TestFailed, 'exec ... in g (%s), l (%s)' %(g,l)
print "assert_stmt" # assert_stmt: 'assert' test [',' test]
assert 1
assert 1, 1
assert lambda x:x
assert 1, lambda x:x+1
### compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | funcdef | classdef
# Tested below
print 'if_stmt' # 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite]
if 1: pass
if 1: pass
else: pass
if 0: pass
elif 0: pass
if 0: pass
elif 0: pass
elif 0: pass
elif 0: pass
else: pass
print 'while_stmt' # 'while' test ':' suite ['else' ':' suite]
while 0: pass
while 0: pass
else: pass
print 'for_stmt' # 'for' exprlist 'in' exprlist ':' suite ['else' ':' suite]
for i in 1, 2, 3: pass
for i, j, k in (): pass
else: pass
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self): return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n = n+1
return self.sofar[i]
n = 0
for x in Squares(10): n = n+x
if n != 285: raise TestFailed, 'for over growing sequence'
print 'try_stmt'
### try_stmt: 'try' ':' suite (except_clause ':' suite)+ ['else' ':' suite]
### | 'try' ':' suite 'finally' ':' suite
### except_clause: 'except' [expr [',' expr]]
try:
1/0
except ZeroDivisionError:
pass
else:
pass
try: 1/0
except EOFError: pass
except TypeError, msg: pass
except RuntimeError, msg: pass
except: pass
else: pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError): pass
try: 1/0
except (EOFError, TypeError, ZeroDivisionError), msg: pass
try: pass
finally: pass
print 'suite' # simple_stmt | NEWLINE INDENT NEWLINE* (stmt NEWLINE*)+ DEDENT
if 1: pass
if 1:
pass
if 1:
#
#
#
pass
pass
#
pass
#
print 'test'
### and_test ('or' and_test)*
### and_test: not_test ('and' not_test)*
### not_test: 'not' not_test | comparison
if not 1: pass
if 1 and 1: pass
if 1 or 1: pass
if not not not 1: pass
if not 1 and 1 and 1: pass
if 1 and 1 or 1 and 1 and 1 or not 1 and 1: pass
print 'comparison'
### comparison: expr (comp_op expr)*
### comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
if 1: pass
x = (1 == 1)
if 1 == 1: pass
if 1 != 1: pass
if 1 <> 1: pass
if 1 < 1: pass
if 1 > 1: pass
if 1 <= 1: pass
if 1 >= 1: pass
if 1 is 1: pass
if 1 is not 1: pass
if 1 in (): pass
if 1 not in (): pass
if 1 < 1 > 1 == 1 >= 1 <= 1 <> 1 != 1 in 1 not in 1 is 1 is not 1: pass
print 'binary mask ops'
x = 1 & 1
x = 1 ^ 1
x = 1 | 1
print 'shift ops'
x = 1 << 1
x = 1 >> 1
x = 1 << 1 >> 1
print 'additive ops'
x = 1
x = 1 + 1
x = 1 - 1 - 1
x = 1 - 1 + 1 - 1 + 1
print 'multiplicative ops'
x = 1 * 1
x = 1 / 1
x = 1 % 1
x = 1 / 1 * 1 % 1
print 'unary ops'
x = +1
x = -1
x = ~1
x = ~1 ^ 1 & 1 | 1 & 1 ^ -1
x = -1*1/1 + 1*1 - ---1*1
print 'selectors'
### trailer: '(' [testlist] ')' | '[' subscript ']' | '.' NAME
### subscript: expr | [expr] ':' [expr]
f1()
f2(1)
f2(1,)
f3(1, 2)
f3(1, 2,)
f4(1, (2, (3, 4)))
v0()
v0(1)
v0(1,)
v0(1,2)
v0(1,2,3,4,5,6,7,8,9,0)
v1(1)
v1(1,)
v1(1,2)
v1(1,2,3)
v1(1,2,3,4,5,6,7,8,9,0)
v2(1,2)
v2(1,2,3)
v2(1,2,3,4)
v2(1,2,3,4,5,6,7,8,9,0)
v3(1,(2,3))
v3(1,(2,3),4)
v3(1,(2,3),4,5,6,7,8,9,0)
print
import sys, time
c = sys.path[0]
x = time.time()
x = sys.modules['time'].time()
a = '01234'
c = a[0]
c = a[-1]
s = a[0:5]
s = a[:5]
s = a[0:]
s = a[:]
s = a[-5:]
s = a[:-1]
s = a[-4:-3]
print 'atoms'
### atom: '(' [testlist] ')' | '[' [testlist] ']' | '{' [dictmaker] '}' | '`' testlist '`' | NAME | NUMBER | STRING
### dictmaker: test ':' test (',' test ':' test)* [',']
x = (1)
x = (1 or 2 or 3)
x = (1 or 2 or 3, 2, 3)
x = []
x = [1]
x = [1 or 2 or 3]
x = [1 or 2 or 3, 2, 3]
x = []
x = {}
x = {'one': 1}
x = {'one': 1,}
x = {'one' or 'two': 1 or 2}
x = {'one': 1, 'two': 2}
x = {'one': 1, 'two': 2,}
x = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6}
x = `x`
x = `1 or 2 or 3`
x = x
x = 'x'
x = 123
### exprlist: expr (',' expr)* [',']
### testlist: test (',' test)* [',']
# These have been exercised enough above
print 'classdef' # 'class' NAME ['(' testlist ')'] ':' suite
class B: pass
class C1(B): pass
class C2(B): pass
class D(C1, C2, B): pass
class C:
def meth1(self): pass
def meth2(self, arg): pass
def meth3(self, a1, a2): pass
# list comprehension tests
nums = [1, 2, 3, 4, 5]
strs = ["Apple", "Banana", "Coconut"]
spcs = [" Apple", " Banana ", "Coco nut "]
print [s.strip() for s in spcs]
print [3 * x for x in nums]
print [x for x in nums if x > 2]
print [(i, s) for i in nums for s in strs]
print [(i, s) for i in nums for s in [f for f in strs if "n" in f]]
def test_in_func(l):
return [None < x < 3 for x in l if x > 2]
print test_in_func(nums)
def test_nested_front():
print [[y for y in [x, x + 1]] for x in [1,3,5]]
test_nested_front()
check_syntax("[i, s for i in nums for s in strs]")
check_syntax("[x if y]")
suppliers = [
(1, "Boeing"),
(2, "Ford"),
(3, "Macdonalds")
]
parts = [
(10, "Airliner"),
(20, "Engine"),
(30, "Cheeseburger")
]
suppart = [
(1, 10), (1, 20), (2, 20), (3, 30)
]
print [
(sname, pname)
for (sno, sname) in suppliers
for (pno, pname) in parts
for (sp_sno, sp_pno) in suppart
if sno == sp_sno and pno == sp_pno
]
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.1/Lib/test/test_grammar.py
|
Python
|
mit
| 15,273
|
[
"GULP"
] |
392490783ff2970f8c7614dc043be71795e58a53b65e36b412806498bd67467e
|
#Bail out when loading this module as it is not python3 compliant
from GangaCore.Core.exceptions import PluginError
raise PluginError("The GangaNA62 module has not been upgraded for python 3. The last python 2 Ganga version is 7.1.15 . Please contact the ganga devs to discuss updating this module.")
import os
import GangaCore.Utility.logging
import GangaCore.Utility.Config
def standardSetup():
import PACKAGE
PACKAGE.standardSetup()
def loadPlugins( config = {} ):
import Lib.Applications
import Lib.Tasks
import Lib.Requirements
#import Lib.Backends
#import Lib.Applications
#import Lib.LHCbDataset
#import Lib.Mergers
#import Lib.RTHandlers
#import Lib.Splitters
# import Lib.DIRAC
#import Lib.Tasks
#from GangaCore.GPIDev.Credentials_old import getCredential
#proxy = getCredential('GridProxy', '')
|
ganga-devs/ganga
|
ganga/GangaNA62/__init__.py
|
Python
|
gpl-3.0
| 861
|
[
"DIRAC"
] |
a710014350dd4f29b2b685bf0f36064c21f6832a5dd8c8297e52760374c1e5cf
|
# -*- coding: utf-8 -*-
"""
@package Bwa
@brief **Wrapper for BWA mem**
Please see the BWA user manual document for further details
[MANUAL](http://bio-bwa.sourceforge.net/bwa.shtml)
Basically the top level function Mem.align processes sequences as follow:
* If a bwa index is provided it will attempt to validate it by creating and IndexWrapper.ExistingIndex object.
* If the validation fails of if no index was provided a new index will be created by using IndexWrapper.ExistingIndex from a reference fasta file
(or a list a fasta files that will be combined in a single reference)
* A instance of MemWrapper.Aligner is then created by passing the Index object as an argument.
* A single or a pair of fastq files are then aligned against the reference through MemWrapper.Aligner
* Finally, results are piped into a sam file
@copyright [GNU General Public License v2](http://www.gnu.org/licenses/gpl-2.0.html)
@author Adrien Leger - 2014
* <adrien.leger@gmail.com>
* <adrien.leger@inserm.fr>
* <adrien.leger@univ-nantes.fr>
* [Github](https://github.com/a-slide)
* [Atlantic Gene Therapies - INSERM 1089] (http://www.atlantic-gene-therapies.fr/)
"""
__all__ = ["Mem", "IndexWrapper", "MemWrapper"]
|
a-slide/pyDNA
|
Bwa/__init__.py
|
Python
|
gpl-2.0
| 1,201
|
[
"BWA"
] |
cfcc939e2eb823e164120163f0e289ab4b5c76c6a593a6b80e43cd091052490a
|
# Author: Travis Oliphant, 2002
#
# Further updates and enhancements by many SciPy developers.
#
from __future__ import division, print_function, absolute_import
import math
import warnings
from collections import namedtuple
import numpy as np
from numpy import (isscalar, r_, log, around, unique, asarray,
zeros, arange, sort, amin, amax, any, atleast_1d,
sqrt, ceil, floor, array, poly1d, compress,
pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot)
from numpy.testing.decorators import setastest
from scipy._lib.six import string_types
from scipy import optimize
from scipy import special
from . import statlib
from . import stats
from .stats import find_repeats, _contains_nan
from .contingency import chi2_contingency
from . import distributions
from ._distn_infrastructure import rv_generic
__all__ = ['mvsdist',
'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot',
'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot',
'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test',
'fligner', 'mood', 'wilcoxon', 'median_test',
'pdf_fromgamma', 'circmean', 'circvar', 'circstd', 'anderson_ksamp'
]
Mean = namedtuple('Mean', ('statistic', 'minmax'))
Variance = namedtuple('Variance', ('statistic', 'minmax'))
Std_dev = namedtuple('Std_dev', ('statistic', 'minmax'))
def bayes_mvs(data, alpha=0.90):
r"""
Bayesian confidence intervals for the mean, var, and std.
Parameters
----------
data : array_like
Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`.
Requires 2 or more data points.
alpha : float, optional
Probability that the returned confidence interval contains
the true parameter.
Returns
-------
mean_cntr, var_cntr, std_cntr : tuple
The three results are for the mean, variance and standard deviation,
respectively. Each result is a tuple of the form::
(center, (lower, upper))
with `center` the mean of the conditional pdf of the value given the
data, and `(lower, upper)` a confidence interval, centered on the
median, containing the estimate to a probability ``alpha``.
See Also
--------
mvsdist
Notes
-----
Each tuple of mean, variance, and standard deviation estimates represent
the (center, (lower, upper)) with center the mean of the conditional pdf
of the value given the data and (lower, upper) is a confidence interval
centered on the median, containing the estimate to a probability
``alpha``.
Converts data to 1-D and assumes all data has the same mean and variance.
Uses Jeffrey's prior for variance and std.
Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))``
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
First a basic example to demonstrate the outputs:
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.bayes_mvs(data)
>>> mean
Mean(statistic=9.0, minmax=(7.1036502226125329, 10.896349777387467))
>>> var
Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...))
>>> std
Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.9456146050146295))
Now we generate some normally distributed random data, and get estimates of
mean and standard deviation with 95% confidence intervals for those
estimates:
>>> n_samples = 100000
>>> data = stats.norm.rvs(size=n_samples)
>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.hist(data, bins=100, normed=True, label='Histogram of data')
>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean')
>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r',
... alpha=0.2, label=r'Estimated mean (95% limits)')
>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale')
>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2,
... label=r'Estimated scale (95% limits)')
>>> ax.legend(fontsize=10)
>>> ax.set_xlim([-4, 4])
>>> ax.set_ylim([0, 0.5])
>>> plt.show()
"""
m, v, s = mvsdist(data)
if alpha >= 1 or alpha <= 0:
raise ValueError("0 < alpha < 1 is required, but alpha=%s was given."
% alpha)
m_res = Mean(m.mean(), m.interval(alpha))
v_res = Variance(v.mean(), v.interval(alpha))
s_res = Std_dev(s.mean(), s.interval(alpha))
return m_res, v_res, s_res
def mvsdist(data):
"""
'Frozen' distributions for mean, variance, and standard deviation of data.
Parameters
----------
data : array_like
Input array. Converted to 1-D using ravel.
Requires 2 or more data-points.
Returns
-------
mdist : "frozen" distribution object
Distribution object representing the mean of the data
vdist : "frozen" distribution object
Distribution object representing the variance of the data
sdist : "frozen" distribution object
Distribution object representing the standard deviation of the data
See Also
--------
bayes_mvs
Notes
-----
The return values from ``bayes_mvs(data)`` is equivalent to
``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``.
In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)``
on the three distribution objects returned from this function will give
the same results that are returned from `bayes_mvs`.
References
----------
T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and
standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278,
2006.
Examples
--------
>>> from scipy import stats
>>> data = [6, 9, 12, 7, 8, 8, 13]
>>> mean, var, std = stats.mvsdist(data)
We now have frozen distribution objects "mean", "var" and "std" that we can
examine:
>>> mean.mean()
9.0
>>> mean.interval(0.95)
(6.6120585482655692, 11.387941451734431)
>>> mean.std()
1.1952286093343936
"""
x = ravel(data)
n = len(x)
if n < 2:
raise ValueError("Need at least 2 data-points.")
xbar = x.mean()
C = x.var()
if n > 1000: # gaussian approximations for large n
mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n))
sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n)))
vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C)
else:
nm1 = n - 1
fac = n * C / 2.
val = nm1 / 2.
mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1))
sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac))
vdist = distributions.invgamma(val, scale=fac)
return mdist, vdist, sdist
def kstat(data, n=2):
r"""
Return the nth k-statistic (1<=n<=4 so far).
The nth k-statistic k_n is the unique symmetric unbiased estimator of the
nth cumulant kappa_n.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2, 3, 4}, optional
Default is equal to 2.
Returns
-------
kstat : float
The nth k-statistic.
See Also
--------
kstatvar: Returns an unbiased estimator of the variance of the k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
For a sample size n, the first few k-statistics are given by:
.. math::
k_{1} = \mu
k_{2} = \frac{n}{n-1} m_{2}
k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3}
k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)}
where :math:`\mu` is the sample mean, :math:`m_2` is the sample
variance, and :math:`m_i` is the i-th sample central moment.
References
----------
http://mathworld.wolfram.com/k-Statistic.html
http://mathworld.wolfram.com/Cumulant.html
Examples
--------
>>> from scipy import stats
>>> rndm = np.random.RandomState(1234)
As sample size increases, n-th moment and n-th k-statistic converge to the
same number (although they aren't identical). In the case of the normal
distribution, they converge to zero.
>>> for n in [2, 3, 4, 5, 6, 7]:
... x = rndm.normal(size=10**n)
... m, k = stats.moment(x, 3), stats.kstat(x, 3)
... print("%.3g %.3g %.3g" % (m, k, m-k))
-0.631 -0.651 0.0194
0.0282 0.0283 -8.49e-05
-0.0454 -0.0454 1.36e-05
7.53e-05 7.53e-05 -2.26e-09
0.00166 0.00166 -4.99e-09
-2.88e-06 -2.88e-06 8.63e-13
"""
if n > 4 or n < 1:
raise ValueError("k-statistics only supported for 1<=n<=4")
n = int(n)
S = np.zeros(n + 1, np.float64)
data = ravel(data)
N = data.size
# raise ValueError on empty input
if N == 0:
raise ValueError("Data input must not be empty")
# on nan input, return nan without warning
if np.isnan(np.sum(data)):
return np.nan
for k in range(1, n + 1):
S[k] = np.sum(data**k, axis=0)
if n == 1:
return S[1] * 1.0/N
elif n == 2:
return (N*S[2] - S[1]**2.0) / (N*(N - 1.0))
elif n == 3:
return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0))
elif n == 4:
return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 -
4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) /
(N*(N-1.0)*(N-2.0)*(N-3.0)))
else:
raise ValueError("Should not be here.")
def kstatvar(data, n=2):
r"""
Returns an unbiased estimator of the variance of the k-statistic.
See `kstat` for more details of the k-statistic.
Parameters
----------
data : array_like
Input array. Note that n-D input gets flattened.
n : int, {1, 2}, optional
Default is equal to 2.
Returns
-------
kstatvar : float
The nth k-statistic variance.
See Also
--------
kstat: Returns the n-th k-statistic.
moment: Returns the n-th central moment about the mean for a sample.
Notes
-----
The variances of the first few k-statistics are given by:
.. math::
var(k_{1}) = \frac{\kappa^2}{n}
var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1}
var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} +
\frac{9 \kappa^2_{3}}{n - 1} +
\frac{6 n \kappa^3_{2}}{(n-1) (n-2)}
var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} +
\frac{48 \kappa_{3} \kappa_5}{n - 1} +
\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} +
\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} +
\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)}
"""
data = ravel(data)
N = len(data)
if n == 1:
return kstat(data, n=2) * 1.0/N
elif n == 2:
k2 = kstat(data, n=2)
k4 = kstat(data, n=4)
return (2*N*k2**2 + (N-1)*k4) / (N*(N+1))
else:
raise ValueError("Only n=1 or n=2 supported.")
def _calc_uniform_order_statistic_medians(n):
"""
Approximations of uniform order statistic medians.
Parameters
----------
n : int
Sample size.
Returns
-------
v : 1d float array
Approximations of the order statistic medians.
References
----------
.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient
Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
Order statistics of the uniform distribution on the unit interval
are marginally distributed according to beta distributions.
The expectations of these order statistic are evenly spaced across
the interval, but the distributions are skewed in a way that
pushes the medians slightly towards the endpoints of the unit interval:
>>> n = 4
>>> k = np.arange(1, n+1)
>>> from scipy.stats import beta
>>> a = k
>>> b = n-k+1
>>> beta.mean(a, b)
array([ 0.2, 0.4, 0.6, 0.8])
>>> beta.median(a, b)
array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642])
The Filliben approximation uses the exact medians of the smallest
and greatest order statistics, and the remaining medians are approximated
by points spread evenly across a sub-interval of the unit interval:
>>> from scipy.morestats import _calc_uniform_order_statistic_medians
>>> _calc_uniform_order_statistic_medians(n)
array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642])
This plot shows the skewed distributions of the order statistics
of a sample of size four from a uniform distribution on the unit interval:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True)
>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)]
>>> plt.figure()
>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3])
"""
v = np.zeros(n, dtype=np.float64)
v[-1] = 0.5**(1.0 / n)
v[0] = 1 - v[-1]
i = np.arange(2, n)
v[1:-1] = (i - 0.3175) / (n + 0.365)
return v
def _parse_dist_kw(dist, enforce_subclass=True):
"""Parse `dist` keyword.
Parameters
----------
dist : str or stats.distributions instance.
Several functions take `dist` as a keyword, hence this utility
function.
enforce_subclass : bool, optional
If True (default), `dist` needs to be a
`_distn_infrastructure.rv_generic` instance.
It can sometimes be useful to set this keyword to False, if a function
wants to accept objects that just look somewhat like such an instance
(for example, they have a ``ppf`` method).
"""
if isinstance(dist, rv_generic):
pass
elif isinstance(dist, string_types):
try:
dist = getattr(distributions, dist)
except AttributeError:
raise ValueError("%s is not a valid distribution name" % dist)
elif enforce_subclass:
msg = ("`dist` should be a stats.distributions instance or a string "
"with the name of such a distribution.")
raise ValueError(msg)
return dist
def _add_axis_labels_title(plot, xlabel, ylabel, title):
"""Helper function to add axes labels and a title to stats plots"""
try:
if hasattr(plot, 'set_title'):
# Matplotlib Axes instance or something that looks like it
plot.set_title(title)
plot.set_xlabel(xlabel)
plot.set_ylabel(ylabel)
else:
# matplotlib.pyplot module
plot.title(title)
plot.xlabel(xlabel)
plot.ylabel(ylabel)
except:
# Not an MPL object or something that looks (enough) like it.
# Don't crash on adding labels or title
pass
def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False):
"""
Calculate quantiles for a probability plot, and optionally show the plot.
Generates a probability plot of sample data against the quantiles of a
specified theoretical distribution (the normal distribution by default).
`probplot` optionally calculates a best-fit line for the data and plots the
results using Matplotlib or a given plot function.
Parameters
----------
x : array_like
Sample/response data from which `probplot` creates the plot.
sparams : tuple, optional
Distribution-specific shape parameters (shape parameters plus location
and scale).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. The default is 'norm' for a
normal probability plot. Objects that look enough like a
stats.distributions instance (i.e. they have a ``ppf`` method) are also
accepted.
fit : bool, optional
Fit a least-squares regression (best-fit) line to the sample data if
True (default).
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
Returns
-------
(osm, osr) : tuple of ndarrays
Tuple of theoretical quantiles (osm, or order statistic medians) and
ordered responses (osr). `osr` is simply sorted input `x`.
For details on how `osm` is calculated see the Notes section.
(slope, intercept, r) : tuple of floats, optional
Tuple containing the result of the least-squares fit, if that is
performed by `probplot`. `r` is the square root of the coefficient of
determination. If ``fit=False`` and ``plot=None``, this tuple is not
returned.
Notes
-----
Even if `plot` is given, the figure is not shown or saved by `probplot`;
``plt.show()`` or ``plt.savefig('figname.png')`` should be used after
calling `probplot`.
`probplot` generates a probability plot, which should not be confused with
a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this
type, see ``statsmodels.api.ProbPlot``.
The formula used for the theoretical quantiles (horizontal axis of the
probability plot) is Filliben's estimate::
quantiles = dist.ppf(val), for
0.5**(1/n), for i = n
val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1
1 - 0.5**(1/n), for i = 1
where ``i`` indicates the i-th ordered value and ``n`` is the total number
of values.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> nsample = 100
>>> np.random.seed(7654321)
A t distribution with small degrees of freedom:
>>> ax1 = plt.subplot(221)
>>> x = stats.t.rvs(3, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A t distribution with larger degrees of freedom:
>>> ax2 = plt.subplot(222)
>>> x = stats.t.rvs(25, size=nsample)
>>> res = stats.probplot(x, plot=plt)
A mixture of two normal distributions with broadcasting:
>>> ax3 = plt.subplot(223)
>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5],
... size=(nsample//2,2)).ravel()
>>> res = stats.probplot(x, plot=plt)
A standard normal distribution:
>>> ax4 = plt.subplot(224)
>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample)
>>> res = stats.probplot(x, plot=plt)
Produce a new figure with a loggamma distribution, using the ``dist`` and
``sparams`` keywords:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> x = stats.loggamma.rvs(c=2.5, size=500)
>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax)
>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5")
Show the results with Matplotlib:
>>> plt.show()
"""
x = np.asarray(x)
_perform_fit = fit or (plot is not None)
if x.size == 0:
if _perform_fit:
return (x, x), (np.nan, np.nan, 0.0)
else:
return x, x
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
dist = _parse_dist_kw(dist, enforce_subclass=False)
if sparams is None:
sparams = ()
if isscalar(sparams):
sparams = (sparams,)
if not isinstance(sparams, tuple):
sparams = tuple(sparams)
osm = dist.ppf(osm_uniform, *sparams)
osr = sort(x)
if _perform_fit:
# perform a linear least squares fit.
slope, intercept, r, prob, sterrest = stats.linregress(osm, osr)
if plot is not None:
plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-')
_add_axis_labels_title(plot, xlabel='Theoretical quantiles',
ylabel='Ordered Values',
title='Probability Plot')
# Add R^2 value to the plot as text
if rvalue:
xmin = amin(osm)
xmax = amax(osm)
ymin = amin(x)
ymax = amax(x)
posx = xmin + 0.70 * (xmax - xmin)
posy = ymin + 0.01 * (ymax - ymin)
plot.text(posx, posy, "$R^2=%1.4f$" % r**2)
if fit:
return (osm, osr), (slope, intercept, r)
else:
return osm, osr
def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'):
"""
Calculate the shape parameter that maximizes the PPCC
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. ppcc_max returns the shape parameter that would maximize the
probability plot correlation coefficient for the given data to a
one-parameter family of distributions.
Parameters
----------
x : array_like
Input array.
brack : tuple, optional
Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c)
then they are assumed to be a starting interval for a downhill bracket
search (see `scipy.optimize.brent`).
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
Returns
-------
shape_value : float
The shape parameter at which the probability plot correlation
coefficient reaches its max value.
See also
--------
ppcc_plot, probplot, boxcox
Notes
-----
The brack keyword serves as a starting point which is useful in corner
cases. One can use a plot to obtain a rough visual estimate of the location
for the maximum to start the search near it.
References
----------
.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000,
... random_state=1234567) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(8, 6))
>>> ax = fig.add_subplot(111)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax)
We calculate the value where the shape should reach its maximum and a red
line is drawn there. The line should coincide with the highest point in the
ppcc_plot.
>>> max = stats.ppcc_max(x)
>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
dist = _parse_dist_kw(dist)
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
osr = sort(x)
# this function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation)
# and returns 1-r so that a minimization function maximizes the
# correlation
def tempfunc(shape, mi, yvals, func):
xvals = func(mi, shape)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf))
def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80):
"""
Calculate and optionally plot probability plot correlation coefficient.
The probability plot correlation coefficient (PPCC) plot can be used to
determine the optimal shape parameter for a one-parameter family of
distributions. It cannot be used for distributions without shape parameters
(like the normal distribution) or with multiple shape parameters.
By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A
Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed
distributions via an approximately normal one, and is therefore particularly
useful in practice.
Parameters
----------
x : array_like
Input array.
a, b: scalar
Lower and upper bounds of the shape parameter to use.
dist : str or stats.distributions instance, optional
Distribution or distribution function name. Objects that look enough
like a stats.distributions instance (i.e. they have a ``ppf`` method)
are also accepted. The default is ``'tukeylambda'``.
plot : object, optional
If given, plots PPCC against the shape parameter.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`a` to `b`).
Returns
-------
svals : ndarray
The shape values for which `ppcc` was calculated.
ppcc : ndarray
The calculated probability plot correlation coefficient values.
See also
--------
ppcc_max, probplot, boxcox_normplot, tukeylambda
References
----------
J.J. Filliben, "The Probability Plot Correlation Coefficient Test for
Normality", Technometrics, Vol. 17, pp. 111-117, 1975.
Examples
--------
First we generate some random data from a Tukey-Lambda distribution,
with shape parameter -0.7:
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234567)
>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4
Now we explore this data with a PPCC plot as well as the related
probability plot and Box-Cox normplot. A red line is drawn where we
expect the PPCC value to be maximal (at the shape parameter -0.7 used
above):
>>> fig = plt.figure(figsize=(12, 4))
>>> ax1 = fig.add_subplot(131)
>>> ax2 = fig.add_subplot(132)
>>> ax3 = fig.add_subplot(133)
>>> res = stats.probplot(x, plot=ax1)
>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2)
>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3)
>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value')
>>> plt.show()
"""
if b <= a:
raise ValueError("`b` has to be larger than `a`.")
svals = np.linspace(a, b, num=N)
ppcc = np.empty_like(svals)
for k, sval in enumerate(svals):
_, r2 = probplot(x, sval, dist=dist, fit=True)
ppcc[k] = r2[-1]
if plot is not None:
plot.plot(svals, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='Shape Values',
ylabel='Prob Plot Corr. Coef.',
title='(%s) PPCC Plot' % dist)
return svals, ppcc
def boxcox_llf(lmb, data):
r"""The boxcox log-likelihood function.
Parameters
----------
lmb : scalar
Parameter for Box-Cox transformation. See `boxcox` for details.
data : array_like
Data to calculate Box-Cox log-likelihood for. If `data` is
multi-dimensional, the log-likelihood is calculated along the first
axis.
Returns
-------
llf : float or ndarray
Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`,
an array otherwise.
See Also
--------
boxcox, probplot, boxcox_normplot, boxcox_normmax
Notes
-----
The Box-Cox log-likelihood function is defined here as
.. math::
llf = (\lambda - 1) \sum_i(\log(x_i)) -
N/2 \log(\sum_i (y_i - \bar{y})^2 / N),
where ``y`` is the Box-Cox transformed input data ``x``.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes
>>> np.random.seed(1245)
Generate some random variates and calculate Box-Cox log-likelihood values
for them for a range of ``lmbda`` values:
>>> x = stats.loggamma.rvs(5, loc=10, size=1000)
>>> lmbdas = np.linspace(-2, 10)
>>> llf = np.zeros(lmbdas.shape, dtype=float)
>>> for ii, lmbda in enumerate(lmbdas):
... llf[ii] = stats.boxcox_llf(lmbda, x)
Also find the optimal lmbda value with `boxcox`:
>>> x_most_normal, lmbda_optimal = stats.boxcox(x)
Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a
horizontal line to check that that's really the optimum:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(lmbdas, llf, 'b.-')
>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r')
>>> ax.set_xlabel('lmbda parameter')
>>> ax.set_ylabel('Box-Cox log-likelihood')
Now add some probability plots to show that where the log-likelihood is
maximized the data transformed with `boxcox` looks closest to normal:
>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right'
>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs):
... xt = stats.boxcox(x, lmbda=lmbda)
... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt)
... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc)
... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-')
... ax_inset.set_xticklabels([])
... ax_inset.set_yticklabels([])
... ax_inset.set_title('$\lambda=%1.2f$' % lmbda)
>>> plt.show()
"""
data = np.asarray(data)
N = data.shape[0]
if N == 0:
return np.nan
y = boxcox(data, lmb)
y_mean = np.mean(y, axis=0)
llf = (lmb - 1) * np.sum(np.log(data), axis=0)
llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0))
return llf
def _boxcox_conf_interval(x, lmax, alpha):
# Need to find the lambda for which
# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1
fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1)
target = boxcox_llf(lmax, x) - fac
def rootfunc(lmbda, data, target):
return boxcox_llf(lmbda, data) - target
# Find positive endpoint of interval in which answer is to be found
newlm = lmax + 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm += 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target))
# Now find negative interval in the same way
newlm = lmax - 0.5
N = 0
while (rootfunc(newlm, x, target) > 0.0) and (N < 500):
newlm -= 0.1
N += 1
if N == 500:
raise RuntimeError("Could not find endpoint.")
lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target))
return lmminus, lmplus
def boxcox(x, lmbda=None, alpha=None):
r"""
Return a positive dataset transformed by a Box-Cox power transformation.
Parameters
----------
x : ndarray
Input array. Should be 1-dimensional.
lmbda : {None, scalar}, optional
If `lmbda` is not None, do the transformation for that value.
If `lmbda` is None, find the lambda that maximizes the log-likelihood
function and return it as the second output argument.
alpha : {None, float}, optional
If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence
interval for `lmbda` as the third output argument.
Must be between 0.0 and 1.0.
Returns
-------
boxcox : ndarray
Box-Cox power transformed array.
maxlog : float, optional
If the `lmbda` parameter is None, the second returned argument is
the lambda that maximizes the log-likelihood function.
(min_ci, max_ci) : tuple of float, optional
If `lmbda` parameter is None and ``alpha`` is not None, this returned
tuple of floats represents the minimum and maximum confidence limits
given ``alpha``.
See Also
--------
probplot, boxcox_normplot, boxcox_normmax, boxcox_llf
Notes
-----
The Box-Cox transform is given by::
y = (x**lmbda - 1) / lmbda, for lmbda > 0
log(x), for lmbda = 0
`boxcox` requires the input data to be positive. Sometimes a Box-Cox
transformation provides a shift parameter to achieve this; `boxcox` does
not. Such a shift parameter is equivalent to adding a positive constant to
`x` before calling `boxcox`.
The confidence limits returned when ``alpha`` is provided give the interval
where:
.. math::
llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1),
with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared
function.
References
----------
G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the
Royal Statistical Society B, 26, 211-252 (1964).
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
We generate some random variates from a non-normal distribution and make a
probability plot for it, to show it is non-normal in the tails:
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(211)
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1)
>>> ax1.set_xlabel('')
>>> ax1.set_title('Probplot against normal distribution')
We now use `boxcox` to transform the data so it's closest to normal:
>>> ax2 = fig.add_subplot(212)
>>> xt, _ = stats.boxcox(x)
>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2)
>>> ax2.set_title('Probplot after Box-Cox transformation')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if any(x <= 0):
raise ValueError("Data must be positive.")
if lmbda is not None: # single transformation
return special.boxcox(x, lmbda)
# If lmbda=None, find the lmbda that maximizes the log-likelihood function.
lmax = boxcox_normmax(x, method='mle')
y = boxcox(x, lmax)
if alpha is None:
return y, lmax
else:
# Find confidence interval
interval = _boxcox_conf_interval(x, lmax, alpha)
return y, lmax, interval
def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'):
"""Compute optimal Box-Cox transform parameter for input data.
Parameters
----------
x : array_like
Input array.
brack : 2-tuple, optional
The starting interval for a downhill bracket search with
`optimize.brent`. Note that this is in most cases not critical; the
final result is allowed to be outside this bracket.
method : str, optional
The method to determine the optimal transform parameter (`boxcox`
``lmbda`` parameter). Options are:
'pearsonr' (default)
Maximizes the Pearson correlation coefficient between
``y = boxcox(x)`` and the expected values for ``y`` if `x` would be
normally-distributed.
'mle'
Minimizes the log-likelihood `boxcox_llf`. This is the method used
in `boxcox`.
'all'
Use all optimization methods available, and return all results.
Useful to compare different methods.
Returns
-------
maxlog : float or ndarray
The optimal transform parameter found. An array instead of a scalar
for ``method='all'``.
See Also
--------
boxcox, boxcox_llf, boxcox_normplot
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234) # make this example reproducible
Generate some data and determine optimal ``lmbda`` in various ways:
>>> x = stats.loggamma.rvs(5, size=30) + 5
>>> y, lmax_mle = stats.boxcox(x)
>>> lmax_pearsonr = stats.boxcox_normmax(x)
>>> lmax_mle
7.177...
>>> lmax_pearsonr
7.916...
>>> stats.boxcox_normmax(x, method='all')
array([ 7.91667384, 7.17718692])
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax)
>>> ax.axvline(lmax_mle, color='r')
>>> ax.axvline(lmax_pearsonr, color='g', ls='--')
>>> plt.show()
"""
def _pearsonr(x, brack):
osm_uniform = _calc_uniform_order_statistic_medians(len(x))
xvals = distributions.norm.ppf(osm_uniform)
def _eval_pearsonr(lmbda, xvals, samps):
# This function computes the x-axis values of the probability plot
# and computes a linear regression (including the correlation) and
# returns ``1 - r`` so that a minimization function maximizes the
# correlation.
y = boxcox(samps, lmbda)
yvals = np.sort(y)
r, prob = stats.pearsonr(xvals, yvals)
return 1 - r
return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x))
def _mle(x, brack):
def _eval_mle(lmb, data):
# function to minimize
return -boxcox_llf(lmb, data)
return optimize.brent(_eval_mle, brack=brack, args=(x,))
def _all(x, brack):
maxlog = np.zeros(2, dtype=float)
maxlog[0] = _pearsonr(x, brack)
maxlog[1] = _mle(x, brack)
return maxlog
methods = {'pearsonr': _pearsonr,
'mle': _mle,
'all': _all}
if method not in methods.keys():
raise ValueError("Method %s not recognized." % method)
optimfunc = methods[method]
return optimfunc(x, brack)
def boxcox_normplot(x, la, lb, plot=None, N=80):
"""Compute parameters for a Box-Cox normality plot, optionally show it.
A Box-Cox normality plot shows graphically what the best transformation
parameter is to use in `boxcox` to obtain a distribution that is close
to normal.
Parameters
----------
x : array_like
Input array.
la, lb : scalar
The lower and upper bounds for the ``lmbda`` values to pass to `boxcox`
for Box-Cox transformations. These are also the limits of the
horizontal axis of the plot if that is generated.
plot : object, optional
If given, plots the quantiles and least squares fit.
`plot` is an object that has to have methods "plot" and "text".
The `matplotlib.pyplot` module or a Matplotlib Axes object can be used,
or a custom object with the same methods.
Default is None, which means that no plot is created.
N : int, optional
Number of points on the horizontal axis (equally distributed from
`la` to `lb`).
Returns
-------
lmbdas : ndarray
The ``lmbda`` values for which a Box-Cox transform was done.
ppcc : ndarray
Probability Plot Correlelation Coefficient, as obtained from `probplot`
when fitting the Box-Cox transformed input `x` against a normal
distribution.
See Also
--------
probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max
Notes
-----
Even if `plot` is given, the figure is not shown or saved by
`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')``
should be used after calling `probplot`.
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Generate some non-normally distributed data, and create a Box-Cox plot:
>>> x = stats.loggamma.rvs(5, size=500) + 5
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax)
Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in
the same plot:
>>> _, maxlog = stats.boxcox(x)
>>> ax.axvline(maxlog, color='r')
>>> plt.show()
"""
x = np.asarray(x)
if x.size == 0:
return x
if lb <= la:
raise ValueError("`lb` has to be larger than `la`.")
lmbdas = np.linspace(la, lb, num=N)
ppcc = lmbdas * 0.0
for i, val in enumerate(lmbdas):
# Determine for each lmbda the correlation coefficient of transformed x
z = boxcox(x, lmbda=val)
_, r2 = probplot(z, dist='norm', fit=True)
ppcc[i] = r2[-1]
if plot is not None:
plot.plot(lmbdas, ppcc, 'x')
_add_axis_labels_title(plot, xlabel='$\lambda$',
ylabel='Prob Plot Corr. Coef.',
title='Box-Cox Normality Plot')
return lmbdas, ppcc
def shapiro(x, a=None, reta=False):
"""
Perform the Shapiro-Wilk test for normality.
The Shapiro-Wilk test tests the null hypothesis that the
data was drawn from a normal distribution.
Parameters
----------
x : array_like
Array of sample data.
a : array_like, optional
Array of internal parameters used in the calculation. If these
are not given, they will be computed internally. If x has length
n, then a must have length n/2.
reta : bool, optional
Whether or not to return the internally computed a values. The
default is False.
Returns
-------
W : float
The test statistic.
p-value : float
The p-value for the hypothesis test.
a : array_like, optional
If `reta` is True, then these are the internally computed "a"
values that may be passed into this function on future calls.
See Also
--------
anderson : The Anderson-Darling test for normality
kstest : The Kolmogorov-Smirnov test for goodness of fit.
Notes
-----
The algorithm used is described in [4]_ but censoring parameters as
described are not implemented. For N > 5000 the W test statistic is accurate
but the p-value may not be.
The chance of rejecting the null hypothesis when it is true is close to 5%
regardless of sample size.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for
normality (complete samples), Biometrika, Vol. 52, pp. 591-611.
.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk,
Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of
Statistical Modeling and Analytics, Vol. 2, pp. 21-33.
.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(12345678)
>>> x = stats.norm.rvs(loc=5, scale=3, size=100)
>>> stats.shapiro(x)
(0.9772805571556091, 0.08144091814756393)
"""
if a is not None or reta:
warnings.warn("input parameters 'a' and 'reta' are scheduled to be "
"removed in version 0.18.0", FutureWarning)
x = np.ravel(x)
N = len(x)
if N < 3:
raise ValueError("Data must be at least length 3.")
if a is None:
a = zeros(N, 'f')
init = 0
else:
if len(a) != N // 2:
raise ValueError("len(a) must equal len(x)/2")
init = 1
y = sort(x)
a, w, pw, ifault = statlib.swilk(y, a[:N//2], init)
if ifault not in [0, 2]:
warnings.warn("Input data for shapiro has range zero. The results "
"may not be accurate.")
if N > 5000:
warnings.warn("p-value may not be accurate for N > 5000.")
if reta:
return w, pw, a
else:
return w, pw
# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and
# Some Comparisons", Journal of he American Statistical
# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737
_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092])
_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957])
# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution",
# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588.
_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038])
# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based
# on the Empirical Distribution Function.", Biometrika,
# Vol. 66, Issue 3, Dec. 1979, pp 591-595.
_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010])
AndersonResult = namedtuple('AndersonResult', ('statistic',
'critical_values',
'significance_level'))
def anderson(x, dist='norm'):
"""
Anderson-Darling test for data coming from a particular distribution
The Anderson-Darling test is a modification of the Kolmogorov-
Smirnov test `kstest` for the null hypothesis that a sample is
drawn from a population that follows a particular distribution.
For the Anderson-Darling test, the critical values depend on
which distribution is being tested against. This function works
for normal, exponential, logistic, or Gumbel (Extreme Value
Type I) distributions.
Parameters
----------
x : array_like
array of sample data
dist : {'norm','expon','logistic','gumbel','gumbel_l', gumbel_r',
'extreme1'}, optional
the type of distribution to test against. The default is 'norm'
and 'extreme1', 'gumbel_l' and 'gumbel' are synonyms.
Returns
-------
statistic : float
The Anderson-Darling test statistic
critical_values : list
The critical values for this distribution
significance_level : list
The significance levels for the corresponding critical values
in percents. The function returns critical values for a
differing set of significance levels depending on the
distribution that is being tested against.
Notes
-----
Critical values provided are for the following significance levels:
normal/exponenential
15%, 10%, 5%, 2.5%, 1%
logistic
25%, 10%, 5%, 2.5%, 1%, 0.5%
Gumbel
25%, 10%, 5%, 2.5%, 1%
If A2 is larger than these critical values then for the corresponding
significance level, the null hypothesis that the data come from the
chosen distribution can be rejected.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm
.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and
Some Comparisons, Journal of the American Statistical Association,
Vol. 69, pp. 730-737.
.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit
Statistics with Unknown Parameters, Annals of Statistics, Vol. 4,
pp. 357-369.
.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value
Distribution, Biometrika, Vol. 64, pp. 583-588.
.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference
to Tests for Exponentiality , Technical Report No. 262,
Department of Statistics, Stanford University, Stanford, CA.
.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution
Based on the Empirical Distribution Function, Biometrika, Vol. 66,
pp. 591-595.
"""
if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l',
'gumbel_r', 'extreme1', 'logistic']:
raise ValueError("Invalid distribution; dist must be 'norm', "
"'expon', 'gumbel', 'extreme1' or 'logistic'.")
y = sort(x)
xbar = np.mean(x, axis=0)
N = len(y)
if dist == 'norm':
s = np.std(x, ddof=1, axis=0)
w = (y - xbar) / s
logcdf = distributions.norm.logcdf(w)
logsf = distributions.norm.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3)
elif dist == 'expon':
w = y / xbar
logcdf = distributions.expon.logcdf(w)
logsf = distributions.expon.logsf(w)
sig = array([15, 10, 5, 2.5, 1])
critical = around(_Avals_expon / (1.0 + 0.6/N), 3)
elif dist == 'logistic':
def rootfunc(ab, xj, N):
a, b = ab
tmp = (xj - a) / b
tmp2 = exp(tmp)
val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N,
np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N]
return array(val)
sol0 = array([xbar, np.std(x, ddof=1, axis=0)])
sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5)
w = (y - sol[0]) / sol[1]
logcdf = distributions.logistic.logcdf(w)
logsf = distributions.logistic.logsf(w)
sig = array([25, 10, 5, 2.5, 1, 0.5])
critical = around(_Avals_logistic / (1.0 + 0.25/N), 3)
elif dist == 'gumbel_r':
xbar, s = distributions.gumbel_r.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_r.logcdf(w)
logsf = distributions.gumbel_r.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1')
xbar, s = distributions.gumbel_l.fit(x)
w = (y - xbar) / s
logcdf = distributions.gumbel_l.logcdf(w)
logsf = distributions.gumbel_l.logsf(w)
sig = array([25, 10, 5, 2.5, 1])
critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3)
i = arange(1, N + 1)
A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0)
return AndersonResult(A2, critical, sig)
def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 7 of Scholz and Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2aKN : float
The A2aKN statistics of Scholz and Stephens 1987.
"""
A2akN = 0.
Z_ssorted_left = Z.searchsorted(Zstar, 'left')
if N == Zstar.size:
lj = 1.
else:
lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left
Bj = Z_ssorted_left + lj / 2.
for i in arange(0, k):
s = np.sort(samples[i])
s_ssorted_right = s.searchsorted(Zstar, side='right')
Mij = s_ssorted_right.astype(float)
fij = s_ssorted_right - s.searchsorted(Zstar, 'left')
Mij -= fij / 2.
inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.)
A2akN += inner.sum() / n[i]
A2akN *= (N - 1.) / N
return A2akN
def _anderson_ksamp_right(samples, Z, Zstar, k, n, N):
"""
Compute A2akN equation 6 of Scholz & Stephens.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample arrays.
Z : array_like
Sorted array of all observations.
Zstar : array_like
Sorted array of unique observations.
k : int
Number of samples.
n : array_like
Number of observations in each sample.
N : int
Total number of observations.
Returns
-------
A2KN : float
The A2KN statistics of Scholz and Stephens 1987.
"""
A2kN = 0.
lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1],
'left')
Bj = lj.cumsum()
for i in arange(0, k):
s = np.sort(samples[i])
Mij = s.searchsorted(Zstar[:-1], side='right')
inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj))
A2kN += inner.sum() / n[i]
return A2kN
Anderson_ksampResult = namedtuple('Anderson_ksampResult',
('statistic', 'critical_values',
'significance_level'))
def anderson_ksamp(samples, midrank=True):
"""The Anderson-Darling test for k-samples.
The k-sample Anderson-Darling test is a modification of the
one-sample Anderson-Darling test. It tests the null hypothesis
that k-samples are drawn from the same population without having
to specify the distribution function of that population. The
critical values depend on the number of samples.
Parameters
----------
samples : sequence of 1-D array_like
Array of sample data in arrays.
midrank : bool, optional
Type of Anderson-Darling test which is computed. Default
(True) is the midrank test applicable to continuous and
discrete populations. If False, the right side empirical
distribution is used.
Returns
-------
statistic : float
Normalized k-sample Anderson-Darling test statistic.
critical_values : array
The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%.
significance_level : float
An approximate significance level at which the null hypothesis for the
provided samples can be rejected.
Raises
------
ValueError
If less than 2 samples are provided, a sample is empty, or no
distinct observations are in the samples.
See Also
--------
ks_2samp : 2 sample Kolmogorov-Smirnov test
anderson : 1 sample Anderson-Darling test
Notes
-----
[1]_ Defines three versions of the k-sample Anderson-Darling test:
one for continuous distributions and two for discrete
distributions, in which ties between samples may occur. The
default of this routine is to compute the version based on the
midrank empirical distribution function. This test is applicable
to continuous and discrete data. If midrank is set to False, the
right side empirical distribution is used for a test for discrete
data. According to [1]_, the two discrete test statistics differ
only slightly if a few collisions due to round-off errors occur in
the test not adjusted for ties between samples.
.. versionadded:: 0.14.0
References
----------
.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample
Anderson-Darling Tests, Journal of the American Statistical
Association, Vol. 82, pp. 918-924.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(314159)
The null hypothesis that the two random samples come from the same
distribution can be rejected at the 5% level because the returned
test value is greater than the critical value for 5% (1.961) but
not at the 2.5% level. The interpolation gives an approximate
significance level of 3.1%:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(loc=0.5, size=30)])
(2.4615796189876105,
array([ 0.325, 1.226, 1.961, 2.718, 3.752]),
0.03134990135800783)
The null hypothesis cannot be rejected for three samples from an
identical distribution. The approximate p-value (87%) has to be
computed by extrapolation and may not be very accurate:
>>> stats.anderson_ksamp([np.random.normal(size=50),
... np.random.normal(size=30), np.random.normal(size=20)])
(-0.73091722665244196,
array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]),
0.8789283903979661)
"""
k = len(samples)
if (k < 2):
raise ValueError("anderson_ksamp needs at least two samples")
samples = list(map(np.asarray, samples))
Z = np.sort(np.hstack(samples))
N = Z.size
Zstar = np.unique(Z)
if Zstar.size < 2:
raise ValueError("anderson_ksamp needs more than one distinct "
"observation")
n = np.array([sample.size for sample in samples])
if any(n == 0):
raise ValueError("anderson_ksamp encountered sample without "
"observations")
if midrank:
A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N)
else:
A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N)
H = (1. / n).sum()
hs_cs = (1. / arange(N - 1, 1, -1)).cumsum()
h = hs_cs[-1] + 1
g = (hs_cs / arange(2, N)).sum()
a = (4*g - 6) * (k - 1) + (10 - 6*g)*H
b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6
c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h
d = (2*h + 6)*k**2 - 4*h*k
sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.))
m = k - 1
A2 = (A2kN - m) / math.sqrt(sigmasq)
# The b_i values are the interpolation coefficients from Table 2
# of Scholz and Stephens 1987
b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326])
b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822])
b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396])
critical = b0 + b1 / math.sqrt(m) + b2 / m
pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2)
if A2 < critical.min() or A2 > critical.max():
warnings.warn("approximate p-value will be computed by extrapolation")
p = math.exp(np.polyval(pf, A2))
return Anderson_ksampResult(A2, critical, p)
AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue'))
def ansari(x, y):
"""
Perform the Ansari-Bradley test for equal scale parameters
The Ansari-Bradley test is a non-parametric test for the equality
of the scale parameter of the distributions from which two
samples were drawn.
Parameters
----------
x, y : array_like
arrays of sample data
Returns
-------
statistic : float
The Ansari-Bradley test statistic
pvalue : float
The p-value of the hypothesis test
See Also
--------
fligner : A non-parametric test for the equality of k variances
mood : A non-parametric test for the equality of two scale parameters
Notes
-----
The p-value given is exact when the sample sizes are both less than
55 and there are no ties, otherwise a normal approximation for the
p-value is used.
References
----------
.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical
methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2.
"""
x, y = asarray(x), asarray(y)
n = len(x)
m = len(y)
if m < 1:
raise ValueError("Not enough other observations.")
if n < 1:
raise ValueError("Not enough test observations.")
N = m + n
xy = r_[x, y] # combine
rank = stats.rankdata(xy)
symrank = amin(array((rank, N - rank + 1)), 0)
AB = np.sum(symrank[:n], axis=0)
uxy = unique(xy)
repeats = (len(uxy) != len(xy))
exact = ((m < 55) and (n < 55) and not repeats)
if repeats and (m < 55 or n < 55):
warnings.warn("Ties preclude use of exact statistic.")
if exact:
astart, a1, ifault = statlib.gscale(n, m)
ind = AB - astart
total = np.sum(a1, axis=0)
if ind < len(a1)/2.0:
cind = int(ceil(ind))
if ind == cind:
pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total
else:
pval = 2.0 * np.sum(a1[:cind], axis=0) / total
else:
find = int(floor(ind))
if ind == floor(ind):
pval = 2.0 * np.sum(a1[find:], axis=0) / total
else:
pval = 2.0 * np.sum(a1[find+1:], axis=0) / total
return AnsariResult(AB, min(1.0, pval))
# otherwise compute normal approximation
if N % 2: # N odd
mnAB = n * (N+1.0)**2 / 4.0 / N
varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2)
else:
mnAB = n * (N+2.0) / 4.0
varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0)
if repeats: # adjust variance estimates
# compute np.sum(tj * rj**2,axis=0)
fac = np.sum(symrank**2, axis=0)
if N % 2: # N odd
varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1))
else: # N even
varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1))
z = (AB - mnAB) / sqrt(varAB)
pval = distributions.norm.sf(abs(z)) * 2.0
return AnsariResult(AB, pval)
BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue'))
def bartlett(*args):
"""
Perform Bartlett's test for equal variances
Bartlett's test tests the null hypothesis that all input samples
are from populations with equal variances. For samples
from significantly non-normal populations, Levene's test
`levene` is more robust.
Parameters
----------
sample1, sample2,... : array_like
arrays of sample data. May be different lengths.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value of the test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
levene : A robust parametric test for equality of k variances
Notes
-----
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm
.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical
Methods, Eighth Edition, Iowa State University Press.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical
Tests. Proceedings of the Royal Society of London. Series A,
Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return BartlettResult(np.nan, np.nan)
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
ssq = zeros(k, 'd')
for j in range(k):
Ni[j] = len(args[j])
ssq[j] = np.var(args[j], ddof=1)
Ntot = np.sum(Ni, axis=0)
spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k))
numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0)
denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) -
1.0/(Ntot - k))
T = numer / denom
pval = distributions.chi2.sf(T, k - 1) # 1 - cdf
return BartlettResult(T, pval)
LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue'))
def levene(*args, **kwds):
"""
Perform Levene test for equal variances.
The Levene test tests the null hypothesis that all input samples
are from populations with equal variances. Levene's test is an
alternative to Bartlett's test `bartlett` in the case where
there are significant deviations from normality.
Parameters
----------
sample1, sample2, ... : array_like
The sample data, possibly with different lengths
center : {'mean', 'median', 'trimmed'}, optional
Which function of the data to use in the test. The default
is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the test.
Notes
-----
Three variations of Levene's test are possible. The possibilities
and their recommended usages are:
* 'median' : Recommended for skewed (non-normal) distributions>
* 'mean' : Recommended for symmetric, moderate-tailed distributions.
* 'trimmed' : Recommended for heavy-tailed distributions.
References
----------
.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm
.. [2] Levene, H. (1960). In Contributions to Probability and Statistics:
Essays in Honor of Harold Hotelling, I. Olkin et al. eds.,
Stanford University Press, pp. 278-292.
.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American
Statistical Association, 69, 364-367
"""
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("levene() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
Ni = zeros(k)
Yci = zeros(k, 'd')
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(np.sort(arg), proportiontocut)
for arg in args)
func = lambda x: np.mean(x, axis=0)
for j in range(k):
Ni[j] = len(args[j])
Yci[j] = func(args[j])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [None] * k
for i in range(k):
Zij[i] = abs(asarray(args[i]) - Yci[i])
# compute Zbari
Zbari = zeros(k, 'd')
Zbar = 0.0
for i in range(k):
Zbari[i] = np.mean(Zij[i], axis=0)
Zbar += Zbari[i] * Ni[i]
Zbar /= Ntot
numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0)
# compute denom_variance
dvar = 0.0
for i in range(k):
dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0)
denom = (k - 1.0) * dvar
W = numer / denom
pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf
return LeveneResult(W, pval)
@setastest(False)
def binom_test(x, n=None, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
This is an exact, two-sided test of the null hypothesis
that the probability of success in a Bernoulli experiment
is `p`.
Parameters
----------
x : integer or array_like
the number of successes, or if x has length 2, it is the
number of successes and the number of failures.
n : integer
the number of trials. This is ignored if x gives both the
number of successes and failures
p : float, optional
The hypothesized probability of success. 0 <= p <= 1. The
default value is p = 0.5
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
p-value : float
The p-value of the hypothesis test
References
----------
.. [1] http://en.wikipedia.org/wiki/Binomial_test
"""
x = atleast_1d(x).astype(np.integer)
if len(x) == 2:
n = x[1] + x[0]
x = x[0]
elif len(x) == 1:
x = x[0]
if n is None or n < x:
raise ValueError("n must be >= x")
n = np.int_(n)
else:
raise ValueError("Incorrect length for x.")
if (p > 1.0) or (p < 0.0):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized\n"
"should be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = distributions.binom.cdf(x, n, p)
return pval
if alternative == 'greater':
pval = distributions.binom.sf(x-1, n, p)
return pval
# if alternative was neither 'less' nor 'greater', then it's 'two-sided'
d = distributions.binom.pmf(x, n, p)
rerr = 1 + 1e-7
if x == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif x < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(x, n, p) +
distributions.binom.sf(n - y, n, p))
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = (distributions.binom.cdf(y-1, n, p) +
distributions.binom.sf(x-1, n, p))
return min(1.0, pval)
def _apply_func(x, g, func):
# g is list of indices into x
# separating x into different groups
# func should be applied over the groups
g = unique(r_[0, g, len(x)])
output = []
for k in range(len(g) - 1):
output.append(func(x[g[k]:g[k+1]]))
return asarray(output)
FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue'))
def fligner(*args, **kwds):
"""
Perform Fligner-Killeen test for equality of variance.
Fligner's test tests the null hypothesis that all input samples
are from populations with equal variances. Fligner-Killeen's test is
distribution free when populations are identical [2]_.
Parameters
----------
sample1, sample2, ... : array_like
Arrays of sample data. Need not be the same length.
center : {'mean', 'median', 'trimmed'}, optional
Keyword argument controlling which function of the data is used in
computing the test statistic. The default is 'median'.
proportiontocut : float, optional
When `center` is 'trimmed', this gives the proportion of data points
to cut from each end. (See `scipy.stats.trim_mean`.)
Default is 0.05.
Returns
-------
statistic : float
The test statistic.
pvalue : float
The p-value for the hypothesis test.
See Also
--------
bartlett : A parametric test for equality of k variances in normal samples
levene : A robust parametric test for equality of k variances
Notes
-----
As with Levene's test there are three variants of Fligner's test that
differ by the measure of central tendency used in the test. See `levene`
for more information.
Conover et al. (1981) examine many of the existing parametric and
nonparametric tests by extensive simulations and they conclude that the
tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be
superior in terms of robustness of departures from normality and power [3]_.
References
----------
.. [1] http://www.stat.psu.edu/~bgl/center/tr/TR993.ps
.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample
tests for scale. 'Journal of the American Statistical Association.'
71(353), 210-213.
.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and
Hypothesis Testing based on Quadratic Inference Function. Technical
Report #99-03, Center for Likelihood Studies, Pennsylvania State
University.
.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A
comparative study of tests for homogeneity of variances, with
applications to the outer continental shelf biding data.
Technometrics, 23(4), 351-361.
"""
# Handle empty input
for a in args:
if np.asanyarray(a).size == 0:
return FlignerResult(np.nan, np.nan)
# Handle keyword arguments.
center = 'median'
proportiontocut = 0.05
for kw, value in kwds.items():
if kw not in ['center', 'proportiontocut']:
raise TypeError("fligner() got an unexpected keyword "
"argument '%s'" % kw)
if kw == 'center':
center = value
else:
proportiontocut = value
k = len(args)
if k < 2:
raise ValueError("Must enter at least two input sample vectors.")
if center not in ['mean', 'median', 'trimmed']:
raise ValueError("Keyword argument <center> must be 'mean', 'median'"
" or 'trimmed'.")
if center == 'median':
func = lambda x: np.median(x, axis=0)
elif center == 'mean':
func = lambda x: np.mean(x, axis=0)
else: # center == 'trimmed'
args = tuple(stats.trimboth(arg, proportiontocut) for arg in args)
func = lambda x: np.mean(x, axis=0)
Ni = asarray([len(args[j]) for j in range(k)])
Yci = asarray([func(args[j]) for j in range(k)])
Ntot = np.sum(Ni, axis=0)
# compute Zij's
Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)]
allZij = []
g = [0]
for i in range(k):
allZij.extend(list(Zij[i]))
g.append(len(allZij))
ranks = stats.rankdata(allZij)
a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5)
# compute Aibar
Aibar = _apply_func(a, g, np.sum) / Ni
anbar = np.mean(a, axis=0)
varsq = np.var(a, axis=0, ddof=1)
Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq
pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf
return FlignerResult(Xsq, pval)
def mood(x, y, axis=0):
"""
Perform Mood's test for equal scale parameters.
Mood's two-sample test for scale parameters is a non-parametric
test for the null hypothesis that two samples are drawn from the
same distribution with the same scale parameter.
Parameters
----------
x, y : array_like
Arrays of sample data.
axis : int, optional
The axis along which the samples are tested. `x` and `y` can be of
different length along `axis`.
If `axis` is None, `x` and `y` are flattened and the test is done on
all values in the flattened arrays.
Returns
-------
z : scalar or ndarray
The z-score for the hypothesis test. For 1-D inputs a scalar is
returned.
p-value : scalar ndarray
The p-value for the hypothesis test.
See Also
--------
fligner : A non-parametric test for the equality of k variances
ansari : A non-parametric test for the equality of 2 variances
bartlett : A parametric test for equality of k variances in normal samples
levene : A parametric test for equality of k variances
Notes
-----
The data are assumed to be drawn from probability distributions ``f(x)``
and ``f(x/s) / s`` respectively, for some probability density function f.
The null hypothesis is that ``s == 1``.
For multi-dimensional arrays, if the inputs are of shapes
``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the
resulting z and p values will have shape ``(n0, n2, n3)``. Note that
``n1`` and ``m1`` don't have to be equal, but the other dimensions do.
Examples
--------
>>> from scipy import stats
>>> np.random.seed(1234)
>>> x2 = np.random.randn(2, 45, 6, 7)
>>> x1 = np.random.randn(2, 30, 6, 7)
>>> z, p = stats.mood(x1, x2, axis=1)
>>> p.shape
(2, 6, 7)
Find the number of points where the difference in scale is not significant:
>>> (p > 0.1).sum()
74
Perform the test with different scales:
>>> x1 = np.random.randn(2, 30)
>>> x2 = np.random.randn(2, 35) * 10.0
>>> stats.mood(x1, x2, axis=1)
(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07]))
"""
x = np.asarray(x, dtype=float)
y = np.asarray(y, dtype=float)
if axis is None:
x = x.flatten()
y = y.flatten()
axis = 0
# Determine shape of the result arrays
res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis])
if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if
ax != axis])):
raise ValueError("Dimensions of x and y on all axes except `axis` "
"should match")
n = x.shape[axis]
m = y.shape[axis]
N = m + n
if N < 3:
raise ValueError("Not enough observations.")
xy = np.concatenate((x, y), axis=axis)
if axis != 0:
xy = np.rollaxis(xy, axis)
xy = xy.reshape(xy.shape[0], -1)
# Generalized to the n-dimensional case by adding the axis argument, and
# using for loops, since rankdata is not vectorized. For improving
# performance consider vectorizing rankdata function.
all_ranks = np.zeros_like(xy)
for j in range(xy.shape[1]):
all_ranks[:, j] = stats.rankdata(xy[:, j])
Ri = all_ranks[:n]
M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0)
# Approx stat.
mnM = n * (N * N - 1.0) / 12
varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180
z = (M - mnM) / sqrt(varM)
# sf for right tail, cdf for left tail. Factor 2 for two-sidedness
z_pos = z > 0
pval = np.zeros_like(z)
pval[z_pos] = 2 * distributions.norm.sf(z[z_pos])
pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos])
if res_shape == ():
# Return scalars, not 0-D arrays
z = z[0]
pval = pval[0]
else:
z.shape = res_shape
pval.shape = res_shape
return z, pval
WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue'))
def wilcoxon(x, y=None, zero_method="wilcox", correction=False):
"""
Calculate the Wilcoxon signed-rank test.
The Wilcoxon signed-rank test tests the null hypothesis that two
related paired samples come from the same distribution. In particular,
it tests whether the distribution of the differences x - y is symmetric
about zero. It is a non-parametric version of the paired T-test.
Parameters
----------
x : array_like
The first set of measurements.
y : array_like, optional
The second set of measurements. If `y` is not given, then the `x`
array is considered to be the differences between the two sets of
measurements.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt":
Pratt treatment: includes zero-differences in the ranking process
(more conservative)
"wilcox":
Wilcox treatment: discards all zero-differences
"zsplit":
Zero rank split: just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the
z-statistic. Default is False.
Returns
-------
statistic : float
The sum of the ranks of the differences above or below zero, whichever
is smaller.
pvalue : float
The two-sided p-value for the test.
Notes
-----
Because the normal approximation is used for the calculations, the
samples used should be large. A typical rule is to require that
n > 20.
References
----------
.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test
"""
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError("Zero method should be either 'wilcox' "
"or 'pratt' or 'zsplit'")
if y is None:
d = asarray(x)
else:
x, y = map(asarray, (x, y))
if len(x) != len(y):
raise ValueError('Unequal N in wilcoxon. Aborting.')
d = x - y
if zero_method == "wilcox":
# Keep all non-zero differences
d = compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
if count < 10:
warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.
r_minus += r_zero / 2.
T = min(r_plus, r_minus)
mn = count * (count + 1.) * 0.25
se = count * (count + 1.) * (2. * count + 1.)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
prob = 2. * distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
@setastest(False)
def median_test(*args, **kwds):
"""
Mood's median test.
Test that two or more samples come from populations with the same median.
Let ``n = len(args)`` be the number of samples. The "grand median" of
all the data is computed, and a contingency table is formed by
classifying the values in each sample as being above or below the grand
median. The contingency table, along with `correction` and `lambda_`,
are passed to `scipy.stats.chi2_contingency` to compute the test statistic
and p-value.
Parameters
----------
sample1, sample2, ... : array_like
The set of samples. There must be at least two samples.
Each sample must be a one-dimensional sequence containing at least
one value. The samples are not required to have the same length.
ties : str, optional
Determines how values equal to the grand median are classified in
the contingency table. The string must be one of::
"below":
Values equal to the grand median are counted as "below".
"above":
Values equal to the grand median are counted as "above".
"ignore":
Values equal to the grand median are not counted.
The default is "below".
correction : bool, optional
If True, *and* there are just two samples, apply Yates' correction
for continuity when computing the test statistic associated with
the contingency table. Default is True.
lambda_ : float or str, optional.
By default, the statistic computed in this test is Pearson's
chi-squared statistic. `lambda_` allows a statistic from the
Cressie-Read power divergence family to be used instead. See
`power_divergence` for details.
Default is 1 (Pearson's chi-squared statistic).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
stat : float
The test statistic. The statistic that is returned is determined by
`lambda_`. The default is Pearson's chi-squared statistic.
p : float
The p-value of the test.
m : float
The grand median.
table : ndarray
The contingency table. The shape of the table is (2, n), where
n is the number of samples. The first row holds the counts of the
values above the grand median, and the second row holds the counts
of the values below the grand median. The table allows further
analysis with, for example, `scipy.stats.chi2_contingency`, or with
`scipy.stats.fisher_exact` if there are two samples, without having
to recompute the table. If ``nan_policy`` is "propagate" and there
are nans in the input, the return value for ``table`` is ``None``.
See Also
--------
kruskal : Compute the Kruskal-Wallis H-test for independent samples.
mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y.
Notes
-----
.. versionadded:: 0.15.0
References
----------
.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill
(1950), pp. 394-399.
.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010).
See Sections 8.12 and 10.15.
Examples
--------
A biologist runs an experiment in which there are three groups of plants.
Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants.
Each plant produces a number of seeds. The seed counts for each group
are::
Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49
Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99
Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84
The following code applies Mood's median test to these samples.
>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49]
>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99]
>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84]
>>> from scipy.stats import median_test
>>> stat, p, med, tbl = median_test(g1, g2, g3)
The median is
>>> med
34.0
and the contingency table is
>>> tbl
array([[ 5, 10, 7],
[11, 5, 10]])
`p` is too large to conclude that the medians are not the same:
>>> p
0.12609082774093244
The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to
`median_test`.
>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood")
>>> p
0.12224779737117837
The median occurs several times in the data, so we'll get a different
result if, for example, ``ties="above"`` is used:
>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above")
>>> p
0.063873276069553273
>>> tbl
array([[ 5, 11, 9],
[11, 4, 8]])
This example demonstrates that if the data set is not large and there
are values equal to the median, the p-value can be sensitive to the
choice of `ties`.
"""
ties = kwds.pop('ties', 'below')
correction = kwds.pop('correction', True)
lambda_ = kwds.pop('lambda_', None)
nan_policy = kwds.pop('nan_policy', 'propagate')
if len(kwds) > 0:
bad_kwd = kwds.keys()[0]
raise TypeError("median_test() got an unexpected keyword "
"argument %r" % bad_kwd)
if len(args) < 2:
raise ValueError('median_test requires two or more samples.')
ties_options = ['below', 'above', 'ignore']
if ties not in ties_options:
raise ValueError("invalid 'ties' option '%s'; 'ties' must be one "
"of: %s" % (ties, str(ties_options)[1:-1]))
data = [np.asarray(arg) for arg in args]
# Validate the sizes and shapes of the arguments.
for k, d in enumerate(data):
if d.size == 0:
raise ValueError("Sample %d is empty. All samples must "
"contain at least one value." % (k + 1))
if d.ndim != 1:
raise ValueError("Sample %d has %d dimensions. All "
"samples must be one-dimensional sequences." %
(k + 1, d.ndim))
cdata = np.concatenate(data)
contains_nan, nan_policy = _contains_nan(cdata, nan_policy)
if contains_nan and nan_policy == 'propagate':
return np.nan, np.nan, np.nan, None
if contains_nan:
grand_median = np.median(cdata[~np.isnan(cdata)])
else:
grand_median = np.median(cdata)
# When the minimum version of numpy supported by scipy is 1.9.0,
# the above if/else statement can be replaced by the single line:
# grand_median = np.nanmedian(cdata)
# Create the contingency table.
table = np.zeros((2, len(data)), dtype=np.int64)
for k, sample in enumerate(data):
sample = sample[~np.isnan(sample)]
nabove = count_nonzero(sample > grand_median)
nbelow = count_nonzero(sample < grand_median)
nequal = sample.size - (nabove + nbelow)
table[0, k] += nabove
table[1, k] += nbelow
if ties == "below":
table[1, k] += nequal
elif ties == "above":
table[0, k] += nequal
# Check that no row or column of the table is all zero.
# Such a table can not be given to chi2_contingency, because it would have
# a zero in the table of expected frequencies.
rowsums = table.sum(axis=1)
if rowsums[0] == 0:
raise ValueError("All values are below the grand median (%r)." %
grand_median)
if rowsums[1] == 0:
raise ValueError("All values are above the grand median (%r)." %
grand_median)
if ties == "ignore":
# We already checked that each sample has at least one value, but it
# is possible that all those values equal the grand median. If `ties`
# is "ignore", that would result in a column of zeros in `table`. We
# check for that case here.
zero_cols = np.where((table == 0).all(axis=0))[0]
if len(zero_cols) > 0:
msg = ("All values in sample %d are equal to the grand "
"median (%r), so they are ignored, resulting in an "
"empty sample." % (zero_cols[0] + 1, grand_median))
raise ValueError(msg)
stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_,
correction=correction)
return stat, p, grand_median, table
def _hermnorm(N):
# return the negatively normalized hermite polynomials up to order N-1
# (inclusive)
# using the recursive relationship
# p_n+1 = p_n(x)' - x*p_n(x)
# and p_0(x) = 1
plist = [None] * N
plist[0] = poly1d(1)
for n in range(1, N):
plist[n] = plist[n-1].deriv() - poly1d([1, 0]) * plist[n-1]
return plist
# Note: when removing pdf_fromgamma, also remove the _hermnorm support function
@np.deprecate(message="scipy.stats.pdf_fromgamma is deprecated in scipy 0.16.0 "
"in favour of statsmodels.distributions.ExpandedNormal.")
def pdf_fromgamma(g1, g2, g3=0.0, g4=None):
if g4 is None:
g4 = 3 * g2**2
sigsq = 1.0 / g2
sig = sqrt(sigsq)
mu = g1 * sig**3.0
p12 = _hermnorm(13)
for k in range(13):
p12[k] /= sig**k
# Add all of the terms to polynomial
totp = (p12[0] - g1/6.0*p12[3] +
g2/24.0*p12[4] + g1**2/72.0 * p12[6] -
g3/120.0*p12[5] - g1*g2/144.0*p12[7] - g1**3.0/1296.0*p12[9] +
g4/720*p12[6] + (g2**2/1152.0 + g1*g3/720)*p12[8] +
g1**2 * g2/1728.0*p12[10] + g1**4.0 / 31104.0*p12[12])
# Final normalization
totp = totp / sqrt(2*pi) / sig
def thefunc(x):
xn = (x - mu) / sig
return totp(xn) * exp(-xn**2 / 2.)
return thefunc
def _circfuncs_common(samples, high, low):
samples = np.asarray(samples)
if samples.size == 0:
return np.nan, np.nan
ang = (samples - low)*2*pi / (high - low)
return samples, ang
def circmean(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular mean for samples in a range.
Parameters
----------
samples : array_like
Input array.
high : float or int, optional
High boundary for circular mean range. Default is ``2*pi``.
low : float or int, optional
Low boundary for circular mean range. Default is 0.
axis : int, optional
Axis along which means are computed. The default is to compute
the mean of the flattened array.
Returns
-------
circmean : float
Circular mean.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).sum(axis=axis)
C = cos(ang).sum(axis=axis)
res = arctan2(S, C)*(high - low)/2.0/pi + low
mask = (S == .0) * (C == .0)
if mask.ndim > 0:
res[mask] = np.nan
return res
def circvar(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular variance for samples assumed to be in a range
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular variance range. Default is 0.
high : float or int, optional
High boundary for circular variance range. Default is ``2*pi``.
axis : int, optional
Axis along which variances are computed. The default is to compute
the variance of the flattened array.
Returns
-------
circvar : float
Circular variance.
Notes
-----
This uses a definition of circular variance that in the limit of small
angles returns a number close to the 'linear' variance.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi)**2 * 2 * log(1/R)
def circstd(samples, high=2*pi, low=0, axis=None):
"""
Compute the circular standard deviation for samples assumed to be in the
range [low to high].
Parameters
----------
samples : array_like
Input array.
low : float or int, optional
Low boundary for circular standard deviation range. Default is 0.
high : float or int, optional
High boundary for circular standard deviation range.
Default is ``2*pi``.
axis : int, optional
Axis along which standard deviations are computed. The default is
to compute the standard deviation of the flattened array.
Returns
-------
circstd : float
Circular standard deviation.
Notes
-----
This uses a definition of circular standard deviation that in the limit of
small angles returns a number close to the 'linear' standard deviation.
"""
samples, ang = _circfuncs_common(samples, high, low)
S = sin(ang).mean(axis=axis)
C = cos(ang).mean(axis=axis)
R = hypot(S, C)
return ((high - low)/2.0/pi) * sqrt(-2*log(R))
# Tests to include (from R) -- some of these already in stats.
########
# X Ansari-Bradley
# X Bartlett (and Levene)
# X Binomial
# Y Pearson's Chi-squared (stats.chisquare)
# Y Association Between Paired samples (stats.pearsonr, stats.spearmanr)
# stats.kendalltau) -- these need work though
# Fisher's exact test
# X Fligner-Killeen Test
# Y Friedman Rank Sum (stats.friedmanchisquare?)
# Y Kruskal-Wallis
# Y Kolmogorov-Smirnov
# Cochran-Mantel-Haenszel Chi-Squared for Count
# McNemar's Chi-squared for Count
# X Mood Two-Sample
# X Test For Equal Means in One-Way Layout (see stats.ttest also)
# Pairwise Comparisons of proportions
# Pairwise t tests
# Tabulate p values for pairwise comparisons
# Pairwise Wilcoxon rank sum tests
# Power calculations two sample test of prop.
# Power calculations for one and two sample t tests
# Equal or Given Proportions
# Trend in Proportions
# Quade Test
# Y Student's T Test
# Y F Test to compare two variances
# XY Wilcoxon Rank Sum and Signed Rank Tests
|
surhudm/scipy
|
scipy/stats/morestats.py
|
Python
|
bsd-3-clause
| 96,359
|
[
"Gaussian"
] |
bff38041b9076a53b2409d03bba8f236afe04844d17489b84eac530d1d4f3f5c
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views import defaults as default_views
from inbox import views as pc_views # views em nível de projeto
urlpatterns = [
url(r'^$', pc_views.index, name='index'),
url(r'^(?P<deposit_id>[0-9]+)/$', pc_views.package_report, name='package_report'),
url(r'^(?P<deposit_id>[0-9]+)/reports/virus/', pc_views.package_report_virus, name='package_report_virus'),
url(r'^(?P<deposit_id>[0-9]+)/reports/integrity/', pc_views.package_report_integrity, name='package_report_integrity'),
url(r'^(?P<deposit_id>[0-9]+)/reports/scielops/', pc_views.package_report_scielops, name='package_report_scielops'),
# Django Admin, use {% url 'admin:index' %}
url(settings.ADMIN_URL, include(admin.site.urls)),
# User management
url(r'^users/', include('inbox.users.urls', namespace='users')),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
url(r'frontdesk/', include('frontdesk.urls', namespace='frontdesk')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', default_views.bad_request, kwargs={'exception': Exception('Bad Request!')}),
url(r'^403/$', default_views.permission_denied, kwargs={'exception': Exception('Permission Denied')}),
url(r'^404/$', default_views.page_not_found, kwargs={'exception': Exception('Page not Found')}),
url(r'^500/$', default_views.server_error),
]
|
brunousml/inbox
|
config/urls.py
|
Python
|
bsd-2-clause
| 1,839
|
[
"VisIt"
] |
edfea8bf58d1d60749dd9ab347f0adb70bc54ac20db05ec3f93cd8fb23cc92bd
|
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.DistributionRequirement import (
DistributionRequirement)
@linter(executable='astyle',
output_format='corrected',
use_stdin=True)
class ArtisticStyleBear:
"""
Artistic Style is a source code indenter, formatter,
and beautifier for the C, C++, C++/CLI, Objective-C,
C# and Java programming languages.
"""
LANGUAGES = {'C', 'C++', 'Objective-C', 'C#', 'Java'}
REQUIREMENTS = {
DistributionRequirement(
apt_get='astyle',
dnf='astyle'
)
}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_FIX = {'Formatting'}
SEE_MORE = 'http://astyle.sourceforge.net/astyle.html'
@staticmethod
def create_arguments(filename, file, config_file,
bracket_style: str='',
use_spaces: bool=None,
indent_size: int=4,
require_braces_at_namespace: bool=True,
require_braces_at_class: bool=True,
require_braces_at_inline: bool=True,
require_braces_at_extern: bool=False,
allow_indent_classes: bool=True,
allow_indent_modifiers: bool=True,
allow_indent_switches: bool=True,
allow_indent_cases: bool=True,
allow_indent_namespaces: bool=False,
allow_indent_labels: bool=True,
allow_indent_preproc_block: bool=True,
allow_indent_preproc_definition: bool=True,
allow_indent_preproc_conditionals: bool=True,
allow_indent_column_one_comments: bool=True,
allow_pad_header_blocks: bool=True,
allow_pad_operators: bool=True,
allow_pad_parenthesis: bool=False,
allow_pad_parenthesis_out: bool=False,
allow_pad_parenthesis_in: bool=False,
prohibit_empty_lines_in_func: bool=False,
break_closing_braces: bool=False,
break_elseifs: bool=False,
break_one_line_headers: bool=False,
require_braces_at_one_line_conditionals: bool=False,
prohibit_braces_from_one_line_conditionals:
bool=False,
prohibit_comment_prefix: bool=True):
"""
:param bracket_style:
Defines the brace style to use.
Possible values are ``allman, java, kr, stroustrup, whitesmith,
vtk, banner, gnu, linux, horstmann, google, mozilla, pico and
lisp.``
For example: Allman style uses braces that are broken from the
previous block. If set to ``allman``, prefer::
int Foo(bool isBar)
{
if (isBar)
{
bar();
return 1;
}
else
return 0;
}
For example: Java style uses braces that are attached to the
end of the last line of the previous block.
If set to ``java``, prefer::
int Foo(bool isBar) {
if (isBar) {
bar();
return 1;
} else
return 0;
}
For example: Kernighan & Ritchie style uses linux braces.
Opening braces are broken from ``namespaces``, ``classes`` and
``function`` definitions. The braces are attached to everything
else, including arrays, structs, enums, and statements within
a function. If set to ``kr``, prefer::
int Foo(bool isBar)
{
if (isBar) {
bar();
return 1;
} else
return 0;
}
For example: Stroustrup style uses linux braces with closing
headers broken from closing braces. Opening braces are broken from
function definitions only. The opening braces are attached to
everything else, including ``namespaces``, ``classes``, ``arrays``,
``structs``, enums, and statements within a function.
If set to ``stroustrup``, prefer::
int Foo(bool isBar)
{
if (isBar) {
bar();
return 1;
}
else
return 0;
}
For example: Whitesmith style uses broken, indented braces.
Switch blocks and class blocks are indented to prevent a
'hanging indent' with the following case statements and C++ class
modifiers (``public``, ``private``, ``protected``).
If set to ``whitesmith``, prefer::
int Foo(bool isBar)
{
if (isBar)
{
bar();
return 1;
}
else
return 0;
}
For example: VTK (Visualization Toolkit) style uses broken,
indented braces, except for the opening brace. Switch blocks are
indented to prevent a 'hanging indent' with following case
statements. If set to ``vtk``, prefer::
int Foo(bool isBar)
{
if (isBar)
{
bar();
return 1;
}
else
return 0;
}
For example: Banner style uses attached, indented braces.
Switch blocks and class blocks are indented to prevent a
'hanging indent' with following case statements and C++ class
modifiers (``public``, ``private``, ``protected``).
If set to ``banner``, prefer::
int Foo(bool isBar) {
if (isBar) {
bar();
return 1;
}
else
return 0;
}
For example: GNU style uses broken braces and indented blocks.
Extra indentation is added to blocks within a function only. Other
braces and blocks are broken, but NOT indented. This style
frequently is used with an indent of 2 spaces. If set to ``gnu``,
prefer::
int Foo(bool isBar)
{
if (isBar)
{
bar();
return 1;
}
else
return 0;
}
For example: Linux style uses linux braces. Opening braces are
broken from namespace, class, and function definitions. The braces
are attached to everything else, including ``arrays``, ``structs``,
``enums``, and statements within a function. The minimum
conditional indent is one-half indent. If you want a different
minimum conditional indent, use the K&R style instead. This style
works best with a large indent. It frequently is used with an
indent of 8 spaces. If set to ``linux``, prefer::
int Foo(bool isBar)
{
if (isFoo) {
bar();
return 1;
} else
return 0;
}
For example: Horstmann style uses broken braces and run-in
statements. ``Switches`` are indented to allow a run-in to the
opening ``switch`` block. This style frequently is used with an
indent of 3 spaces. If set to ``horstmann``, prefer::
int Foo(bool isBar)
{ if (isBar)
{ bar();
return 1;
}
else
return 0;
}
For example: Google style uses attached braces and indented
class access modifiers. This is not actually a unique brace
style, but is Java style with a non-brace variation. This style
frequently is used with an indent of 2 spaces. If set to
``google``, prefer::
int Foo(bool isBar) {
if (isBar) {
bar();
return 1;
} else
return 0;
}
For example: Mozilla style uses linux braces. Opening braces
are broken from ``classes``, ``structs``, ``enums``, and
``function`` definitions. The braces are attached to everything
else, including ``namespaces``, ``arrays``, and ``statements``
within a ``function``. This style frequently is used with an
indent of 2 spaces. If set to ``mozilla``, prefer::
int Foo(bool isBar)
{
if (isBar) {
bar();
return 1;
} else
return 0;
}
For example: Pico style uses broken braces and run-in
statements with attached closing braces. The closing brace is
attached to the last line in the block. ``Switches`` are indented
to allow a run-in to the opening ``switch`` block. This style
frequently is used with an indent of 2 spaces.
If set to ``pico``, prefer::
int Foo(bool isBar)
{ if (isBar)
{ bar();
return 1; }
else
return 0; }
For example: Lisp style uses attached opening and closing
braces. The closing brace is attached to the last line in the
block. If set to ``lisp``,
prefer::
int Foo(bool isBar) {
if (isBar) {
bar()
return 1; }
else
return 0; }
:param use_spaces:
In the following examples, ``q`` space is indicated with a ``.``
(dot), a tab is indicated by a > (greater than).
For example: If ``None``, the default option of 4 spaces will be
used as below::
void Foo() {
....if (isBar1
............&& isBar2)
........bar();
}
For example: If set to ``True``, spaces will be used for
indentation.
For example: If set to ``False``, tabs will be used for
indentation, and spaces for continuation line alignment as below::
void Foo() {
> if (isBar1
> ........&& isBar2)
> > bar();
}
:param indent_size:
Number of spaces per indentation level.
For example: If ``use_spaces`` is ``True`` and ``indent_size`` is
``3``, prefer::
void Foo() {
...if (isBar1
.........&& isBar2)
......bar();
}
:param require_braces_at_namespace:
Attach braces to a namespace statement. This is done
regardless of the brace style being used.
For example: If set to ``True``, prefer::
namespace FooName {
...
}
:param require_braces_at_class:
Attach braces to a class statement. This is done regardless of the
brace style being used.
For example: If set to ``True``, prefer::
class FooClass {
...
};
:param require_braces_at_inline:
Attach braces to class and struct inline function definitions. This
option has precedence for all styles except ``Horstmann`` and
``Pico`` (run-in styles). It is effective for C++ files only.
For example: If set to ``True``, prefer::
class FooClass
{
void Foo() {
...
}
};
:param require_braces_at_extern:
Attach braces to a braced extern "C" statement. This is done
regardless of the brace style being used. This option is effective
for C++ files only.
For example: If set to ``True``, prefer::
#ifdef __cplusplus
extern "C" {
#endif
:param allow_indent_classes:
Indent ``class`` and ``struct`` blocks so that the entire block is
indented. The ``struct`` blocks are indented only if an access
modifier, ``public:``, ``protected:`` or ``private:``, is declared
somewhere in the ``struct``. This option is effective for C++ files
only. For example: If set to ``True``, prefer this::
class Foo
{
public:
Foo();
virtual ~Foo();
};
over this::
class Foo
{
public:
Foo();
virtual ~Foo();
};
:param allow_indent_modifiers:
Indent ``class`` and ``struct`` access modifiers, ``public:``,
``protected:`` and ``private:``, one half indent. The rest of the
class is not indented. This option is effective for C++ files only.
For example: If set to ``True``, prefer this::
class Foo
{
public:
Foo();
virtual ~Foo();
};
over this::
class Foo
{
public:
Foo();
virtual ~Foo();
};
:param allow_indent_switches:
Indent ``switch`` blocks so that the ``case X:`` statements are
indented in the switch block. The entire case block is indented.
For example: If set to ``True``, prefer this::
switch (foo)
{
case 1:
a += 1;
break;
case 2:
{
a += 2;
break;
}
}
over this::
switch (foo)
{
case 1:
a += 1;
break;
case 2:
{
a += 2;
break;
}
}
:param allow_indent_cases:
Indent ``case X:`` blocks from the ``case X:`` headers. Case
statements not enclosed in blocks are NOT indented.
For example: If set to ``True``, prefer this::
switch (foo)
{
case 1:
a += 1;
break;
case 2:
{
a += 2;
break;
}
}
over this::
switch (foo)
{
case 1:
a += 1;
break;
case 2:
{
a += 2;
break;
}
}
:param allow_indent_namespaces:
Add extra indentation to namespace blocks. This option has no
effect on Java files.
For example: If set to ``True``, prefer this::
namespace foospace
{
class Foo
{
public:
Foo();
virtual ~Foo();
};
}
over this::
namespace foospace
{
class Foo
{
public:
Foo();
virtual ~Foo();
};
}
:param allow_indent_labels:
Add extra indentation to labels so they appear 1 indent less than
the current indentation, rather than being flushed to the
left (the default).
For example: If set to ``True``, prefer this::
void Foo() {
while (isFoo) {
if (isFoo)
goto error;
...
error:
...
}
}
over this::
void Foo() {
while (isFoo) {
if (isFoo)
goto error;
...
error:
...
}
}
:param allow_indent_preproc_block:
Indent preprocessor blocks at brace level zero and immediately
within a namespace. There are restrictions on what will be
indented. Blocks within methods, classes, arrays, etc., will not
be indented. Blocks containing braces or multi-line define
statements will not be indented. Without this option the
preprocessor block is not indented.
For example: If set to ``True``, prefer this::
#ifdef _WIN32
#include <windows.h>
#ifndef NO_EXPORT
#define EXPORT
#endif
#endif
over this::
#ifdef _WIN32
#include <windows.h>
#ifndef NO_EXPORT
#define EXPORT
#endif
#endif
:param allow_indent_preproc_definition:
Indent multi-line preprocessor definitions ending with a backslash.
Should be used with ``convert_tabs_to_spaces`` for proper results.
Does a pretty good job, but cannot perform miracles in obfuscated
preprocessor definitions. Without this option the preprocessor
statements remain unchanged.
For example: If set to ``True``, prefer this::
#define Is_Bar(arg,a,b) \
(Is_Foo((arg), (a)) \
|| Is_Foo((arg), (b)))
over this::
#define Is_Bar(arg,a,b) \
(Is_Foo((arg), (a)) \
|| Is_Foo((arg), (b)))
:param allow_indent_preproc_conditionals:
Indent preprocessor conditional statements to the same level as the
source code.
For example: If set to ``True``, prefer this::
isFoo = true;
#ifdef UNICODE
text = wideBuff;
#else
text = buff;
#endif
over this::
isFoo = true;
#ifdef UNICODE
text = wideBuff;
#else
text = buff;
#endif
:param allow_indent_column_one_comments:
Indent C++ comments beginning in column one. By default C++
comments beginning in column one are assumed to be commented-out
code and not indented. This option will allow the comments to be
indented with the code.
For example: If set to ``True``, prefer this::
void Foo()\n"
{
// comment
if (isFoo)
bar();
}
over this::
void Foo()\n"
{
// comment
if (isFoo)
bar();
}
:param allow_pad_header_blocks:
Pad empty lines around header blocks
(e.g. ``if``, ``for``, ``while``...).
For example: If set to ``True``, prefer this::
isFoo = true;
if (isFoo) {
bar();
} else {
anotherBar();
}
isBar = false;
over this::
isFoo = true;
if (isFoo) {
bar();
} else {
anotherBar();
}
isBar = false;
:param allow_pad_operators:
Insert space padding around operators. This will also pad commas.
For example: If set to ``True``, prefer this::
if (foo == 2)
a = bar((b - c) * a, d--);
over this::
if (foo==2)
a=bar((b-c)*a,d--);
:param allow_pad_parenthesis:
Insert space padding around parenthesis on both the outside and the
inside.
For example: If set to ``True``, prefer this::
if ( isFoo ( ( a+2 ), b ) )
bar ( a, b );
over this::
if (isFoo((a+2), b))
bar(a, b);
:param allow_pad_parenthesis_out:
Insert space padding around parenthesis on the outside only.
Parenthesis that are empty will not be padded.
For example: If set to ``True``, prefer this::
if (isFoo ( (a+2), b) )
bar (a, b);
over this::
if (isFoo((a+2), b))
bar(a, b);
:param allow_pad_parenthesis_in:
Insert space padding around parenthesis on the inside only.
For example: If set to ``True``, prefer this::
if ( isFoo( ( a+2 ), b ) )
bar( a, b );
over this::
if (isFoo((a+2), b))
bar(a, b);
:param prohibit_empty_lines_in_func:
Delete empty lines within a function or method. Empty lines outside
of functions or methods are NOT deleted.
For example: If set to ``True``, prefer this::
void Foo()
{
foo1 = 1;
foo2 = 2;
}
over this::
void Foo()
{
foo1 = 1;
foo2 = 2;
}
:param break_closing_braces:
When used with some specific ``bracket_style``, this breaks closing
headers (e.g. ``else``, ``catch``, ...) from their immediately
preceding closing braces. Closing header braces are always broken
with the other styles.
For example: If set to ``True``, prefer this::
void Foo(bool isFoo) {
if (isFoo) {
bar();
}
else {
anotherBar();
}
}
over this::
void Foo(bool isFoo) {
if (isFoo) {
bar();
} else {
anotherBar();
}
}
:param break_elseifs:
Break ``else if`` header combinations into separate lines.
For example: If set to ``True``, prefer this::
if (isFoo) {
bar();
}
else
if (isFoo1()) {
bar1();
}
else
if (isFoo2()) {
bar2();
}
over this::
if (isFoo) {
bar();
}
else if (isFoo1()) {
bar1();
}
else if (isFoo2()) {
bar2;
}
:param break_one_line_headers:
Break one line headers (e.g. ``if``, ``while``, ``else``, ...) from
a statement residing on the same line. If the statement is enclosed
in braces, the braces will be formatted according to the requested
brace style.
For example: If set to ``True``, prefer this::
void Foo(bool isFoo)
{
if (isFoo1)
bar1();
if (isFoo2) {
bar2();
}
}
over this::
void Foo(bool isFoo)
{
if (isFoo1) bar1();
if (isFoo2) { bar2(); }
}
:param require_braces_at_one_line_conditionals:
Add braces to unbraced one line conditional statements
(e.g. ``if``, ``for``, ``while``...). The statement must be on a
single line. The braces will be added according to the requested
brace style.
For example: If set to ``True``, prefer this::
if (isFoo) {
isFoo = false;
}
over this::
if (isFoo)
isFoo = false;
:param prohibit_braces_from_one_line_conditionals:
Remove braces from conditional statements
(e.g. ``if``, ``for``, ``while``...). The statement must be a
single statement on a single line.
For example: If set to ``True``, prefer this::
if (isFoo)
isFoo = false;
over this::
if (isFoo)
{
isFoo = false;
}
:param prohibit_comment_prefix:
Remove the preceding '*' in a multi-line comment that begins a
line. A trailing '*', if present, is also removed. Text that is
less than one indent is indented to one indent. Text greater than
one indent is not changed. Multi-line comments that begin a line,
but without the preceding '*', are indented to one indent for
consistency. This can slightly modify the indentation of commented
out blocks of code. Lines containing all '*' are left unchanged.
Extra spacing is removed from the comment close '*/'.
For example: If set to ``True``, prefer this::
/*
comment line 1
comment line 2
*/
over this::
/*
* comment line 1
* comment line 2
*/
"""
rules_map = {
'--attach-namespaces': require_braces_at_namespace,
'--attach-classes': require_braces_at_class,
'--attach-inlines': require_braces_at_inline,
'--attach-extern-c': require_braces_at_extern,
'--indent-classes': allow_indent_classes,
'--indent-modifiers': allow_indent_modifiers,
'--indent-switches': allow_indent_switches,
'--indent-cases': allow_indent_cases,
'--indent-namespaces': allow_indent_namespaces,
'--indent-labels': allow_indent_labels,
'--indent-preproc-block': allow_indent_preproc_block,
'--indent-preproc-define': allow_indent_preproc_definition,
'--indent-preproc-cond': allow_indent_preproc_conditionals,
'--indent-col1-comments': allow_indent_column_one_comments,
'--break-blocks': allow_pad_header_blocks,
'--pad-oper': allow_pad_operators,
'--pad-paren': allow_pad_parenthesis,
'--pad-paren-out': allow_pad_parenthesis_out,
'--pad-paren-in': allow_pad_parenthesis_in,
'--delete-empty-lines': prohibit_empty_lines_in_func,
'--break-closing-brackets': break_closing_braces,
'--break-elseifs': break_elseifs,
'--break-one-line-headers': break_one_line_headers,
'--add-brackets': require_braces_at_one_line_conditionals,
'--remove-brackets': prohibit_braces_from_one_line_conditionals,
'--remove-comment-prefix': prohibit_comment_prefix
}
args = ['--suffix=none', '--dry-run']
if bracket_style:
args.append('--style=' + bracket_style)
if use_spaces is True:
args.append('-s' + str(indent_size))
elif use_spaces is False:
args.append('-t' + str(indent_size))
args += (k for k, v in rules_map.items() if v)
return args
|
IPMITMO/statan
|
coala-bears/bears/c_languages/ArtisticStyleBear.py
|
Python
|
mit
| 30,014
|
[
"VTK"
] |
dfd1a611bcdcf968be600a6b9dab73a1f6321c811acbaed8982759bbc201b3db
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import (division, print_function, absolute_import,
unicode_literals)
__all__ = ["sample_ball", "MH_proposal_axisaligned"]
import numpy as np
# If mpi4py is installed, import it.
try:
from mpi4py import MPI
MPI = MPI
except ImportError:
MPI = None
def sample_ball(p0, std, size=1):
"""
Produce a ball of walkers around an initial parameter value.
:param p0: The initial parameter value.
:param std: The axis-aligned standard deviation.
:param size: The number of samples to produce.
"""
assert(len(p0) == len(std))
return np.vstack([p0 + std * np.random.normal(size=len(p0))
for i in range(size)])
class MH_proposal_axisaligned(object):
"""
A Metropolis-Hastings proposal, with axis-aligned Gaussian steps,
for convenient use as the ``mh_proposal`` option to
:func:`EnsembleSampler.sample` .
"""
def __init__(self, stdev):
self.stdev = stdev
def __call__(self, X):
(nw, npar) = X.shape
assert(len(self.stdev) == npar)
return X + self.stdev * np.random.normal(size=X.shape)
if MPI is not None:
class _close_pool_message(object):
def __repr__(self):
return "<Close pool message>"
class _function_wrapper(object):
def __init__(self, function):
self.function = function
def _error_function(task):
raise RuntimeError("Pool was sent tasks before being told what "
"function to apply.")
class MPIPool(object):
"""
A pool that distributes tasks over a set of MPI processes. MPI is an
API for distributed memory parallelism. This pool will let you run
emcee without shared memory, letting you use much larger machines
with emcee.
The pool only support the :func:`map` method at the moment because
this is the only functionality that emcee needs. That being said,
this pool is fairly general and it could be used for other purposes.
Contributed by `Joe Zuntz <https://github.com/joezuntz>`_.
:param comm: (optional)
The ``mpi4py`` communicator.
:param debug: (optional)
If ``True``, print out a lot of status updates at each step.
"""
def __init__(self, comm=MPI.COMM_WORLD, debug=False):
self.comm = comm
self.rank = comm.Get_rank()
self.size = comm.Get_size() - 1
self.debug = debug
self.function = _error_function
if self.size == 0:
raise ValueError("Tried to create an MPI pool, but there "
"was only one MPI process available. "
"Need at least two.")
def is_master(self):
"""
Is the current process the master?
"""
return self.rank == 0
def wait(self):
"""
If this isn't the master process, wait for instructions.
"""
if self.is_master():
raise RuntimeError("Master node told to await jobs.")
status = MPI.Status()
while True:
# Event loop.
# Sit here and await instructions.
if self.debug:
print("Worker {0} waiting for task.".format(self.rank))
# Blocking receive to wait for instructions.
task = self.comm.recv(source=0, tag=MPI.ANY_TAG, status=status)
if self.debug:
print("Worker {0} got task {1} with tag {2}."
.format(self.rank, task, status.tag))
# Check if message is special sentinel signaling end.
# If so, stop.
if isinstance(task, _close_pool_message):
if self.debug:
print("Worker {0} told to quit.".format(self.rank))
break
# Check if message is special type containing new function
# to be applied
if isinstance(task, _function_wrapper):
self.function = task.function
if self.debug:
print("Worker {0} replaced its task function: {1}."
.format(self.rank, self.function))
continue
# If not a special message, just run the known function on
# the input and return it asynchronously.
result = self.function(task)
if self.debug:
print("Worker {0} sending answer {1} with tag {2}."
.format(self.rank, result, status.tag))
self.comm.isend(result, dest=0, tag=status.tag)
def map(self, function, tasks):
"""
Like the built-in :func:`map` function, apply a function to all
of the values in a list and return the list of results.
:param function:
The function to apply to the list.
:param tasks:
The list of elements.
"""
ntask = len(tasks)
# If not the master just wait for instructions.
if not self.is_master():
self.wait()
# sys.exit(0)
return
if function is not self.function:
if self.debug:
print("Master replacing pool function with {0}."
.format(function))
self.function = function
F = _function_wrapper(function)
# Tell all the workers what function to use.
requests = []
for i in range(self.size):
r = self.comm.isend(F, dest=i + 1)
requests.append(r)
# Wait until all of the workers have responded. See:
# https://gist.github.com/4176241
MPI.Request.waitall(requests)
# Send all the tasks off and wait for them to be received.
# Again, see the bug in the above gist.
requests = []
for i, task in enumerate(tasks):
worker = i % self.size + 1
if self.debug:
print("Sent task {0} to worker {1} with tag {2}."
.format(task, worker, i))
r = self.comm.isend(task, dest=worker, tag=i)
requests.append(r)
MPI.Request.waitall(requests)
# Now wait for the answers.
results = []
for i in range(ntask):
worker = i % self.size + 1
if self.debug:
print("Master waiting for worker {0} with tag {1}"
.format(worker, i))
result = self.comm.recv(source=worker, tag=i)
results.append(result)
return results
def close(self):
"""
Just send a message off to all the pool members which contains
the special :class:`_close_pool_message` sentinel.
"""
if self.is_master():
for i in range(self.size):
self.comm.isend(_close_pool_message(), dest=i + 1)
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
|
GabrielaCR/AGNfitter
|
emcee/utils.py
|
Python
|
mit
| 7,581
|
[
"Gaussian"
] |
3905989a894357e8975648c43006731fca07a3c4053f84894c69dc6a16ea929d
|
# -*- coding: utf-8 -*-
"""
codeMarble_Core.actionRule
~~~~~~~~~~~~~~~~~~~~~~~~~
check and manage rule after placement rule.
:copyright: (c) 2017 by codeMarble
"""
import os
import sys
from errorCode import *
class ActionRule(object):
def __init__(self):
pass
# actionRuleNum(0:script, 1:remove, 2:change), actionRuleOption1(0:script, 1:+dir, 2:×dir, 3:8dir, 4:go)
# actionRuleOption2(0:othello, n:size)
def checkActionRule(self, data):
try:
if data.actionRule is 1:
return self.removeObject(data)
elif data.actionRule is 2:
return self.changeObject(data)
else:
return SERVER_ERROR
except Exception as e:
return SERVER_ERROR
# actionRuleOption1(0:script, 1:+dir, 2:×dir, 3:8dir, 4:go)
# actionRuleOption2(0:othello, n:size)
# actionRuleOption1, actionRuleOption2, gameBoard, dataBoard, pos
def removeObject(self, data):
try:
if 1 <= data.actionOption[0] <= 3: # remove size or othello
if data.actionOption[1] == 0: # othello
return self.actionObjectByOthello(data.gameBoard, data.pos, 0)
else: # size
return self.actionObjectBySize(data.gameBoard, data.pos, data.actionOption, 0)
elif data.actionOption[0] == 4: # remove go rule
return self.actionObjectByGo(data.gameBoard, data.pos, 0)
else:
return SERVER_ERROR
except Exception as e:
return SERVER_ERROR
def actionObjectByGo(self, board, pos, value):
pi, pj = pos
me = board[pi][pj]
you = -me
goRule = self.GoRule()
yous = goRule.findYou(board, pos)
for (i, j) in yous:
if goRule.checkBoard(board, (i, j)):
goRule.remove(board, (i, j), value)
return True
def actionObjectBySize(self, board, pos, actionOption, value):
pi, pj = pos
directions = self.getDirection(actionOption[0])
for i in range(1, actionOption[1] + 1):
for d in directions:
try:
if ((0 <= pi + d[0] * i < len(board)) and (0 <= pj + d[1] * i < len(board))) and board[pi + d[0] * i][pj + d[1] * i] < 0:
board[pi + d[0] * i][pj + d[1] * i] *= -1
except Exception as e:
continue
return True
def actionObjectByOthello(self, board, pos, value):
pi, pj = pos
dirs = self.getDirection(1)
me = board[pi][pj]
you = -me
for d in dirs:
for i in range(len(board)):
try:
ni, nj = pi + i * d[0], pj + i * d[1]
if board[ni][nj] == me:
i, j = pi, pj
while i+d[0] != ni or j+d[1] != nj:
board[i+d[0]][j+d[1]] = value
i, j = i + d[0], j + d[1]
break
except Exception as e:
break
return True
def getDirection(self, actionRuleOption):
if actionRuleOption is 1:
dirs = [[-1, 0], [1, 0], [0, 1], [0, -1]]
elif actionRuleOption is 2:
dirs = [[-1, -1], [-1, 1], [1, -1], [1, 1]]
elif actionRuleOption is 3:
dirs = [[-1, 0], [1, 0], [0, 1], [0, -1], [-1, -1], [-1, 1], [1, -1], [1, 1]]
else:
return SERVER_ERROR
return dirs
# actionRuleNum, actionRuleOption1, actionRuleOption2, gameBoard, dataBoard, pos
def changeObject(self, data):
try:
if 1 <= data.actionOption[0] <= 3: # if direction
if data.actionOption[1] == 0: # by othello
return self.actionObjectByOthello(data.gameBoard, data.pos, data.gameBoard[data.pos[0]][data.pos[1]])
else: # by size
return self.actionObjectBySize(data.gameBoard, data.pos, data.actionOption, data.gameBoard[data.pos[0]][data.pos[1]])
elif data.actionOption[0] == 4: # if go rule
return self.actionObjectByGo(data.gameBoard, data.pos, data.gameBoard[data.pos[0]][data.pos[1]])
else:
return SERVER_ERROR
except Exception as e:
return SERVER_ERROR
class GoRule:
def checkBoard(self, board, pos):
pi, pj = pos
me = board[pi][pj]
you = -me
dirs = [[-1, 0], [1, 0], [0, 1], [0, -1]]
visit = [[0 for i in range(len(board[0]))] for j in range(len(board))]
stack = [[pi, pj]]
while stack:
i, j = stack.pop()
visit[i][j] = 1
for d in dirs:
ni, nj = d[0] + i, d[1] + j
if board[ni][nj] == you or board[ni][nj] == me and visit[ni][nj]:
continue
elif board[ni][nj] == me and not visit[ni][nj]:
stack.append([ni, nj])
else:
return False
return True
def remove(self, board, pos, value):
pi, pj = pos
me = board[pi][pj]
you = -me
dirs = [[-1, 0], [1, 0], [0, 1], [0, -1]]
visit = [[0 for i in range(len(board[0]))] for j in range(len(board))]
stack = [[pi, pj]]
while stack:
i, j = stack.pop()
visit[i][j] = 1
board[i][j] = value
for d in dirs:
ni, nj = d[0] + i, d[1] + j
if board[ni][nj] == you or visit[ni][nj]:
continue
elif board[ni][nj] == me and not visit[ni][nj]:
stack.append([ni, nj])
else:
return False
return True
def findYou(self, board, pos):
pi, pj = pos
me = board[pi][pj]
you = -me
dirs = [[-1, 0], [1, 0], [0, 1], [0, -1]]
lst = list()
for d in dirs:
try:
ni, nj = d[0] + pi, d[1] + pj
if board[ni][nj] == you:
lst.append([ni, nj])
except Exception as e:
pass
return lst
|
codeMarble/codeMarble_Web
|
backendCelery/codeMarble/actionRule.py
|
Python
|
gpl-3.0
| 6,512
|
[
"VisIt"
] |
e8cd5f88011a8fe780f9c552ae3a45da2ed8ebe9664a3d6977a039c7340cb45b
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests many modules to call pyscf functions."""
from __future__ import absolute_import
from openfermion.chem import MolecularData
from openfermionpyscf import run_pyscf
from openfermionpyscf import PyscfMolecularData
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., 0.7414))]
basis = '6-31g'
multiplicity = 1
charge = 0
molecule = MolecularData(geometry,
basis,
multiplicity,
charge)
def test_run_pyscf():
new_mole = run_pyscf(molecule,
run_scf=True,
run_mp2=True,
run_cisd=True,
run_ccsd=True,
run_fci=True,
verbose=1)
assert isinstance(new_mole, PyscfMolecularData)
|
quantumlib/OpenFermion-PySCF
|
openfermionpyscf/tests/_run_pyscf_test.py
|
Python
|
apache-2.0
| 1,370
|
[
"PySCF"
] |
c04ce08980f11560c8aff0c345ebbc6b07eac9eca309b1cce9dc0b027bee9060
|
# -*- coding: utf-8 -*-
"""
This submodule provides routines to generate and analyse "Flux Surfaces" as
described in (Mumford et. al. 2014).
Flux Surfaces are created by the tracing of a closed loop of fieldlines,
from which a surface is reconstructed by creating polygons between the
pesudo parallel streamlines.
"""
import numpy as np
from tvtk.api import tvtk
import tvtk.common as tvtk_common
__all__ = ['move_seeds', 'make_circle_seeds', 'create_flux_surface',
'update_flux_surface', 'make_poly_norms', 'norms_sanity_check',
'get_surface_vectors', 'interpolate_scalars', 'interpolate_vectors',
'update_interpolated_vectors', 'update_interpolated_scalars',
'get_surface_velocity_comp', 'get_the_line', 'update_the_line',
'get_surface_indexes', 'PolyDataWriter', 'write_step', 'write_flux',
'write_wave_flux', 'read_step', 'get_data']
def move_seeds(seeds, vfield, dt):
"""
Move a list of seeds based on a velocity field.
.. warning:: WARNING: THIS IS HARD CODED FOR GRID SIZE!
Parameters
----------
seeds: tvtk.PolyData
Old seed points
vfield: mayavi.sources.array_source.ArraySource object
The velocity field
dt: float
The time step betweent the current and the previous step.
Returns
-------
seeds_arr: ndarray
New Seed points
"""
v_seed = tvtk.ProbeFilter()
tvtk_common.configure_input_data(v_seed, seeds)
tvtk_common.configure_source_data(v_seed, vfield)
v_seed.update()
int_vels = np.array(v_seed.output.point_data.vectors)[:,:2]/(15.625*1e3)
seed_arr = np.array(seeds.points)
seed_arr[:,:2] += int_vels * dt
#seeds.points = seed_arr
return seed_arr
def make_circle_seeds(n, r, **domain):
"""
Generate an array of n seeds evenly spaced in a circle at radius r.
Parameters
----------
n: integer
Number of Seeds to Create
r: float
Radius of the Circle in grid points
**domain: Dict
kwargs specifiying the properties of the domain.
Returns
-------
surf_seeds: tvtk.PolyData
vtkPolyData containg point data with the seed locations.
Needs: xmax, ymax, zmax
"""
xc = domain['xmax']/2
yc = domain['ymax']/2
ti = 0
surf_seeds = []
for theta in np.linspace(0, 2 * np.pi, n, endpoint=False):
surf_seeds.append([r * np.cos(theta + 0.5 * ti) + xc,
r * np.sin(theta + 0.5 * ti) + yc, domain['zmax']])
surf_seeds_arr = np.array(surf_seeds)
surf_seeds = tvtk.PolyData()
surf_seeds.points = surf_seeds_arr
return surf_seeds
def create_flux_surface(bfield, surf_seeds):
"""
Create a flux surface from an array of seeds and a tvtk vector field.
Parameters
----------
bfield: tvtk.ImageData
The vector field to use for streamline traceing
surf_seeds: numpy.ndarray
The array of seed points to start the fieldline tracing from
Returns
-------
surf_field_lines: tvtk.StreamTracer instance
The fieldline tracer with the fieldlines stored inside it.
surface: tvtk.RuledSurfaceFilter instance
The surface built from the StreamTracer instance
"""
#Make a streamline instance with the bfield
surf_field_lines = tvtk.StreamTracer()
# surf_field_lines.input_connection = bfield
tvtk_common.configure_input(surf_field_lines, bfield)
tvtk_common.configure_source_data(surf_field_lines, surf_seeds)
# surf_field_lines.source = surf_seeds
surf_field_lines.integrator = tvtk.RungeKutta4()
surf_field_lines.maximum_propagation = 1000
surf_field_lines.integration_direction = 'backward'
surf_field_lines.update()
#Create surface from 'parallel' lines
surface = tvtk.RuledSurfaceFilter()
tvtk_common.configure_connection(surface, surf_field_lines)
# surface.input = surf_field_lines.output
surface.close_surface = True
surface.pass_lines = True
surface.offset = 0
surface.distance_factor = 30
surface.ruled_mode = 'point_walk'
# surface.ruled_mode = 'resample'
# surface.resolution = (10,1)
surface.update()
return surf_field_lines, surface
def update_flux_surface(surf_seeds, surf_field_lines, surface):
"""
Update the flux surface streamlines and surface.
"""
surf_field_lines.update()
surface.update()
def make_poly_norms(poly_data):
"""
Extract the normal vectors from a PolyData instance (A surface).
Parameters
----------
poly_data: tvtk.PolyData instance
The poly data to extract normal vectors from
Returns
-------
poly_norms: tvtk.PolyDataNormals instance
The normal vectors
"""
poly_norms = tvtk.PolyDataNormals()
tvtk_common.configure_input_data(poly_norms, poly_data)
# poly_norms.input = poly_data
poly_norms.compute_point_normals = True
poly_norms.flip_normals = False
poly_norms.update()
return poly_norms
def norms_sanity_check(poly_norms):
"""
Check that the normals are pointing radially outwards.
..warning:: THIS IS HARD CODED to grid size and surface size
Parameters
----------
poly_norms: tvtk.PolyDataNormals instance
The normals to check
Returns
-------
poly_normals: tvtk.PolyDataNormals instance
The same normals but flipped if needed
"""
norm1 = poly_norms.output.point_data.normals[1000]
norm_sanity = np.dot(norm1,
np.array(poly_norms.input.points.get_point(1000))-
np.array([63,63,poly_norms.input.points.get_point(1000)[2]]))
if norm_sanity < 0:
poly_norms.flip_normals = not(poly_norms.flip_normals)
poly_norms.update()
passfail = False
else:
passfail = True
return passfail, poly_norms
def get_surface_vectors(poly_norms, surf_bfield):
""" Calculate the vector normal, vertically parallel and Azimuthally around
the surface cont"""
# Update the Normals
poly_norms.update()
passfail, poly_norms = norms_sanity_check(poly_norms)
# print "pass norm check?", passfail
normals = np.array(poly_norms.output.point_data.normals)
parallels = surf_bfield / np.sqrt(np.sum(surf_bfield**2,axis=1))[:, np.newaxis]
torsionals = np.cross(normals,parallels)
torsionals /= np.sqrt(np.sum(torsionals**2,axis=1))[:, np.newaxis]
return normals, torsionals, parallels
def interpolate_scalars(image_data, poly_data):
""" Interpolate a imagedata scalars to a set points in polydata"""
surface_probe_filter = tvtk.ProbeFilter()
tvtk_common.configure_source_data(surface_probe_filter, image_data)
tvtk_common.configure_input_data(surface_probe_filter, poly_data)
surface_probe_filter.update()
# Calculate Vperp, Vpar, Vphi
surface_scalars = np.array(surface_probe_filter.output.point_data.scalars)
return surface_probe_filter, surface_scalars
def interpolate_vectors(image_data, poly_data):
""" Interpolate a imagedata vectors to a set points in polydata"""
surface_probe_filter = tvtk.ProbeFilter()
tvtk_common.configure_source_data(surface_probe_filter, image_data)
tvtk_common.configure_input_data(surface_probe_filter, poly_data)
surface_probe_filter.update()
# Calculate Vperp, Vpar, Vphi
surface_vectors = np.array(surface_probe_filter.output.point_data.vectors)
return surface_probe_filter, surface_vectors
def update_interpolated_vectors(poly_data, surface_probe_filter):
if poly_data:
tvtk_common.configure_input_data(surface_probe_filter, poly_data)
#surface_probe_filter.input = poly_data
surface_probe_filter.update()
# Calculate Vperp, Vpar, Vphi
surface_vectors = np.array(surface_probe_filter.output.point_data.vectors)
return surface_vectors
def update_interpolated_scalars(poly_data, surface_probe_filter):
if poly_data:
tvtk_common.configure_input_data(surface_probe_filter, poly_data)
#surface_probe_filter.input = poly_data
surface_probe_filter.update()
# Calculate Vperp, Vpar, Vphi
surface_scalars = np.array(surface_probe_filter.output.point_data.scalars)
return surface_scalars
def get_surface_velocity_comp(surface_velocities, normals, torsionals, parallels):
vperp = np.zeros(len(surface_velocities))
vpar = np.zeros(len(surface_velocities))
vphi = np.zeros(len(surface_velocities))
for i in xrange(0,len(surface_velocities)):
vperp[i] = np.dot(normals[i],surface_velocities[i])
vpar[i] = np.dot(parallels[i],surface_velocities[i])
vphi[i] = np.dot(torsionals[i],surface_velocities[i])
return vperp, vpar, vphi
def get_the_line(bfield, surf_seeds, n):
"""Generate the vertical line on the surface"""
the_line = tvtk.StreamTracer()
source=tvtk.PolyData(points=np.array([surf_seeds.points.get_point(n),[0,0,0]]))
tvtk_common.configure_input_data(the_line, bfield)
tvtk_common.configure_source_data(the_line, source)
the_line.integrator = tvtk.RungeKutta4()
the_line.maximum_propagation = 1000
the_line.integration_direction = 'backward'
the_line.update()
return the_line
def update_the_line(the_line, surf_seeds, seed, length):
""" Updates the TD line at each time step, while making sure the length is fixed"""
the_line.source.points = np.array([surf_seeds.get_point(seed), [0.0, 0.0, 0.0]])
the_line.update()
N = len(the_line.output.points)
if N < length:
print len(the_line.output.points)
for i in list(the_line.output.points)[N-1:]:
the_line.output.points.append(i)
if N > length:
print len(the_line.output.points)
the_line.output.points = list(the_line.output.points)[:length]
return the_line
def get_surface_indexes(surf_poly,the_line):
point_locator = tvtk.PointLocator(data_set=surf_poly)
surf_line_index = []
surf_line_points = []
for point in the_line.output.points:
surf_line_index.append(point_locator.find_closest_point(point))
surf_line_points.append(surf_poly.points.get_point(surf_line_index[-1]))
return surf_line_index, surf_line_points
class PolyDataWriter(object):
"""
This class allows you to write tvtk polydata objects to a file, with as
many or as few associated PointData arrays as you wish.
Parameters
----------
"""
def __init__(self, filename, polydata):
self.poly_out = polydata
self.filename = filename
def add_point_data(self, vectors=None, scalars=None,
vector_name=None, scalar_name=None):
"""
Add a vector comonent and a associated scalar
"""
#Error Checking:
if vectors is not None or scalars is not None:
raise ValueError("Need to specify Vectors or scalars")
if vectors is not None and vector_name is None:
raise ValueError("If vectors is specified a name must be specified")
if scalars is not None and scalar_name is None:
raise ValueError("If scalars is specified a name must be specified")
pd_par = tvtk.PointData(scalars=scalars,vectors=vectors)
pd_par.scalars.name = scalar_name
pd_par.vectors.name = vector_name
self.poly_out.point_data.add_array(pd_par.scalars)
self.poly_out.point_data.add_array(pd_par.vectors)
def add_array(self, **kwargs):
"""
Add any number of arrays via keyword arguments.
Examples
--------
Add one scalar
>>> writer = PolyDataWriter(filename, polydata)
>>> writer.add_array(myscalar=myarray)
>>> writer.write
"""
for name, array in kwargs.items():
pd_par = tvtk.PointData(scalars=array)
pd_par.scalars.name = name
self.poly_out.point_data.add_array(pd_par.scalars)
def write(self):
w = tvtk.XMLPolyDataWriter(file_name=self.filename)
tvtk_common.configure_input(w, self.poly_out)
w.write()
def write_step(file_name, surface,
normals, parallels, torsionals, vperp, vpar, vphi):
"""
Write out the surface vectors and velocity compnents.
"""
writer = PolyDataWriter(file_name, surface.output)
writer.add_point_data(scalars=vpar, vectors=parallels,
scalar_name="vpar", vector_name="par")
writer.add_point_data(scalars=vperp,vectors=normals,
scalar_name="vperp", vector_name="perp")
writer.add_point_data(scalars=vphi, vectors=torsionals,
scalar_name="vphi", vector_name="phi")
writer.write()
def write_flux(file_name, surface, surface_density, surface_va, surface_beta,
surface_cs, Fpar, Fperp, Fphi):
pd_density = tvtk.PointData(scalars=surface_density)
pd_density.scalars.name = "surface_density"
pd_va = tvtk.PointData(scalars=surface_va)
pd_va.scalars.name = "surface_va"
pd_beta = tvtk.PointData(scalars=surface_beta)
pd_beta.scalars.name = "surface_beta"
pd_cs = tvtk.PointData(scalars=surface_cs)
pd_cs.scalars.name = "surface_cs"
pd_Fpar = tvtk.PointData(scalars=Fpar)
pd_Fpar.scalars.name = "Fpar"
pd_Fperp = tvtk.PointData(scalars=Fperp)
pd_Fperp.scalars.name = "Fperp"
pd_Fphi = tvtk.PointData(scalars=Fphi)
pd_Fphi.scalars.name = "Fphi"
poly_out = surface
poly_out.point_data.add_array(pd_density.scalars)
poly_out.point_data.add_array(pd_va.scalars)
poly_out.point_data.add_array(pd_beta.scalars)
poly_out.point_data.add_array(pd_cs.scalars)
poly_out.point_data.add_array(pd_Fpar.scalars)
poly_out.point_data.add_array(pd_Fperp.scalars)
poly_out.point_data.add_array(pd_Fphi.scalars)
w = tvtk.XMLPolyDataWriter(file_name=file_name)
tvtk_common.configure_input(w, poly_out)
w.write()
def write_wave_flux(file_name, surface_poly, parallels, normals, torsionals,
Fwpar, Fwperp, Fwphi):
pd_Fwpar = tvtk.PointData(scalars=Fwpar, vectors=parallels)
pd_Fwpar.scalars.name = "Fwpar"
pd_Fwpar.vectors.name = "par"
pd_Fwperp = tvtk.PointData(scalars=Fwperp, vectors=normals)
pd_Fwperp.scalars.name = "Fwperp"
pd_Fwperp.vectors.name = "perp"
pd_Fwphi = tvtk.PointData(scalars=Fwphi, vectors=torsionals)
pd_Fwphi.scalars.name = "Fwphi"
pd_Fwphi.vectors.name = "phi"
poly_out = surface_poly
poly_out.point_data.add_array(pd_Fwpar.scalars)
poly_out.point_data.add_array(pd_Fwperp.scalars)
poly_out.point_data.add_array(pd_Fwphi.scalars)
w = tvtk.XMLPolyDataWriter(file_name=file_name)
tvtk_common.configure_input(w, poly_out)
w.write()
def read_step(filename):
""" Read back in a saved surface file"""
r = tvtk.XMLPolyDataReader(file_name=filename)
r.update()
return r.output
def get_data(poly_out,name):
names = {}
#Extract varibles from file
for i in xrange(0,poly_out.point_data.number_of_arrays):
names.update({poly_out.point_data.get_array_name(i):i})
data = np.array(poly_out.point_data.get_array(names[name]))
return data
|
Cadair/pysac
|
pysac/analysis/tube3D/tvtk_tube_functions.py
|
Python
|
bsd-2-clause
| 15,238
|
[
"Mayavi"
] |
bf56f5aacf9588c361314a42fd605095aa6f4d501742db4e3608f84fc4a8a24b
|
import unittest
from hamcrest import assert_that, contains, instance_of
from backdrop.core.upload.parse_excel import parse_excel, EXCEL_ERROR
from tests.support.test_helpers import fixture_path
class ParseExcelTestCase(unittest.TestCase):
def _parse_excel(self, file_name):
file_stream = open(fixture_path(file_name))
return parse_excel(file_stream)
def test_parse_an_xlsx_file(self):
assert_that(self._parse_excel("data.xlsx"), contains(contains(
["name", "age", "nationality"],
["Pawel", 27, "Polish"],
["Max", 35, "Italian"],
)))
def test_parse_xlsx_dates(self):
assert_that(self._parse_excel("dates.xlsx"), contains(contains(
["date"],
["2013-12-03T13:30:00+00:00"],
["2013-12-04T00:00:00+00:00"],
)))
def test_parse_xls_file(self):
assert_that(self._parse_excel("xlsfile.xls"), contains(contains(
["date", "name", "number"],
["2013-12-03T13:30:00+00:00", "test1", 12],
["2013-12-04T00:00:00+00:00", "test2", 34],
)))
def test_parse_xlsx_with_error(self):
assert_that(self._parse_excel("error.xlsx"), contains(contains(
["date", "name", "number", "error"],
["2013-12-03T13:30:00+00:00", "test1", 12, EXCEL_ERROR],
["2013-12-04T00:00:00+00:00", "test2", 34, EXCEL_ERROR],
)))
def test_parse_xlsx_with_multiple_sheets(self):
assert_that(self._parse_excel("multiple_sheets.xlsx"), contains(
contains(
["Sheet 1 content"],
["Nothing exciting"]
),
contains(
["Sheet 2 content", None],
["Sheet Name", "Sheet Index"],
["First", 0],
["Second", 1]
)))
def test_parse_xlsx_handle_empty_cells_and_lines(self):
assert_that(self._parse_excel("empty_cell_and_row.xlsx"), contains(
contains(
["Next cell is none", None, "Previous cell is none"],
[None, None, None],
["The above row", "is full", "of nones"]
)))
def test_that_numbers_are_converted_to_int_where_possible(self):
data = map(list, self._parse_excel("xlsfile.xls"))
assert_that(data[0][1][2], instance_of(int))
|
alphagov/backdrop
|
tests/core/upload/test_parse_excel.py
|
Python
|
mit
| 2,376
|
[
"exciting"
] |
4e35ab280af4679c275361006b299b4b9d7b3553e40665a615acc482612c6d5b
|
"""Utilities to manage processing flowcells and retrieving Galaxy stored info.
"""
import os
import glob
from six.moves import urllib, http_cookiejar
import json
def parse_dirname(fc_dir):
"""Parse the flow cell ID and date from a flow cell directory.
"""
(_, fc_dir) = os.path.split(fc_dir)
parts = fc_dir.split("_")
name = None
date = None
for p in parts:
if p.endswith(("XX", "xx", "XY", "X2", "X3")):
name = p
elif len(p) == 6:
try:
int(p)
date = p
except ValueError:
pass
if name is None or date is None:
raise ValueError("Did not find flowcell name: %s" % fc_dir)
return name, date
def get_qseq_dir(fc_dir):
"""Retrieve the qseq directory within Solexa flowcell output.
"""
machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls")
if os.path.exists(machine_bc):
return machine_bc
# otherwise assume we are in the qseq directory
# XXX What other cases can we end up with here?
else:
return fc_dir
def get_fastq_dir(fc_dir):
"""Retrieve the fastq directory within Solexa flowcell output.
"""
full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*"))
bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*"))
machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls")
if os.path.exists(machine_bc):
return os.path.join(machine_bc, "fastq")
elif len(full_goat_bc) > 0:
return os.path.join(full_goat_bc[0], "fastq")
elif len(bustard_bc) > 0:
return os.path.join(bustard_bc[0], "fastq")
# otherwise assume we are in the fastq directory
# XXX What other cases can we end up with here?
else:
return fc_dir
class GalaxySqnLimsApi:
"""Manage talking with the Galaxy REST api for sequencing information.
"""
def __init__(self, base_url, user, passwd):
self._base_url = base_url
# build cookies so we keep track of being logged in
cj = http_cookiejar.LWPCookieJar()
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
urllib.request.install_opener(opener)
login = dict(email=user, password=passwd, login_button='Login')
req = urllib.request.Request("%s/user/login" % self._base_url,
urllib.parse.urlencode(login))
response = urllib.request.urlopen(req)
def run_details(self, run):
"""Retrieve sequencing run details as a dictionary.
"""
run_data = dict(run=run)
req = urllib.request.Request("%s/nglims/api_run_details" % self._base_url,
urllib.parse.urlencode(run_data))
response = urllib.request.urlopen(req)
info = json.loads(response.read())
if "error" in info:
raise ValueError("Problem retrieving info: %s" % info["error"])
else:
return info["details"]
|
a113n/bcbio-nextgen
|
bcbio/illumina/flowcell.py
|
Python
|
mit
| 3,017
|
[
"Galaxy"
] |
da76dc5c4ef0b83ad92e1ab4a73097082829995e4aa1b29a26064cc8ab971bde
|
# /usr/bin/env python
# -*- coding: utf-8 -*-
# ##############################################################################
#
#
# Project: ECOOP, sponsored by The National Science Foundation
# Purpose: this code is part of the Cyberinfrastructure developed for the ECOOP project
# http://tw.rpi.edu/web/project/ECOOP
# from the TWC - Tetherless World Constellation
# at RPI - Rensselaer Polytechnic Institute
# founded by NSF
#
# Author: Massimo Di Stefano , distem@rpi.edu -
# http://tw.rpi.edu/web/person/MassimoDiStefano
#
###############################################################################
# Copyright (c) 2008-2014 Tetherless World Constellation at Rensselaer Polytechnic Institute
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
from __future__ import print_function
import os
import envoy
from datetime import datetime
import numpy as np
import scipy.stats as sts
import statsmodels.api as sm
from scipy.interpolate import interp1d
import pandas as pd
import matplotlib.pyplot as plt
from ecoop.ecooputil import shareUtil as EU
lowess = sm.nonparametric.lowess
try:
from IPython.core.display import display, Javascript
except:
print('you need to run this code from inside an IPython notebook in order to save provenance')
eu = EU()
#from bokeh import pyplot
class cfData():
def __init__(self):
self.x = ''
def nao_get(self,
url="https://climatedataguide.ucar.edu/sites/default/files/climate_index_files/nao_station_djfm.ascii",
save=None, csvout='nao.csv', prov=False, verbose=False):
"""
read NAO data from url and return a pandas dataframe
:param str url: url to data online default is set to :
https://climatedataguide.ucar.edu/sites/default/files/climate_index_files/nao_station_djfm.txt
:param str save: directory where to save raw data as csv
:return: naodata as pandas dataframe
:rtype: pandas dataframe
"""
#source_code_link = "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.nao_get"
try:
naodata = pd.read_csv(url, sep=' ', header=0, skiprows=0, index_col=0, parse_dates=True, skip_footer=1)
if verbose:
print('dataset used: %s' % url)
if save:
eu.ensure_dir(save)
output = os.path.join(save, csvout)
naodata.to_csv(output, sep=',', header=True, index=True, index_label='Date')
if verbose:
print('nao data saved in : ' + output)
if prov:
jsonld = {
"@id": "ex:NAO_dataset",
"@type": ["prov:Entity", "ecoop:Dataset"],
"ecoop_ext:hasCode": {
"@id": "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.nao_get",
"@type": "ecoop_ext:Code",
"ecoop_ext:hasFunction_src_code_link": url,
"ecoop_ext:hasParameter": [
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "csvout",
"ecoop_ext:parameter_value": csvout
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "save",
"ecoop_ext:parameter_value": save
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "url",
"ecoop_ext:parameter_value": url
}
]
}
}
#display('nao_get - metadata saved', metadata={'ecoop_prov': jsonld})
print(jsonld)
display(Javascript("IPython.notebook.metadata.ecoop_prov['nao_get'] = %s" % jsonld))
return naodata
except IOError:
print(
'unable to fetch the data, check if %s is a valid address and data is conform to NAO spec, for info about data spec. see [1]' % url)
# try cached version / history-linked-uri
def nin_get(self, url='http://www.cpc.ncep.noaa.gov/data/indices/sstoi.indices', save=None, csvout='nin.csv',
prov=False, verbose=False):
"""
read NIN data from url and return a pandas dataframe
:param str url: url to data online default is set to : http://www.cpc.ncep.noaa.gov/data/indices/sstoi.indices
:param str save: directory where to save raw data as csv
:return: nindata as pandas dataframe
:rtype: pandas dataframe
"""
try:
ts_raw = pd.read_table(url, sep=' ', header=0, skiprows=0, parse_dates=[['YR', 'MON']],
skipinitialspace=True,
index_col=0, date_parser=parse)
if verbose:
print('dataset used: %s' % url)
ts_year_group = ts_raw.groupby(lambda x: x.year).apply(lambda sdf: sdf if len(sdf) > 11 else None)
ts_range = pd.date_range(ts_year_group.index[0][1], ts_year_group.index[-1][1] + pd.DateOffset(months=1),
freq="M")
ts = pd.DataFrame(ts_year_group.values, index=ts_range, columns=ts_year_group.keys())
ts_fullyears_group = ts.groupby(lambda x: x.year)
nin_anomalies = (ts_fullyears_group.mean()['ANOM.3'] - sts.nanmean(
ts_fullyears_group.mean()['ANOM.3'])) / sts.nanstd(ts_fullyears_group.mean()['ANOM.3'])
nin_anomalies = pd.DataFrame(nin_anomalies.values,
index=pd.to_datetime([str(x) for x in nin_anomalies.index]))
nin_anomalies = nin_anomalies.rename(columns={'0': 'nin'})
nin_anomalies.columns = ['nin']
if save:
eu.ensure_dir(save)
output = os.path.join(save, csvout)
nin_anomalies.to_csv(output, sep=',', header=True, index=True, index_label='Date')
if verbose:
print('data saved as %s ' % output)
if prov:
function = {}
function['name'] = 'nin_get'
function['parameters'] = {}
function['parameters']['url'] = url
function['parameters']['save'] = save
function['parameters']['csvout'] = csvout
display('cell-output metadata saved', metadata={'nin_get': function})
jsonld = {
"@id": "ex:NIN_dataset",
"@type": ["prov:Entity", "ecoop:Dataset"],
"ecoop_ext:hasCode": {
"@id": "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.nin_get",
"@type": "ecoop_ext:Code",
"ecoop_ext:hasFunction_src_code_link": url,
"ecoop_ext:hasParameter": [
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "csvout",
"ecoop_ext:parameter_value": csvout
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "save",
"ecoop_ext:parameter_value": save
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "url",
"ecoop_ext:parameter_value": url
}
]
}
}
#display('nin_get - metadata saved', metadata={'ecoop_prov': jsonld})
#print(jsonld)
display(Javascript("IPython.notebook.metadata.ecoop_prov['nin_get'] = '%s'" % jsonld))
return nin_anomalies
except IOError:
print(
'unable to fetch the data, check if %s is a valid address and data is conform to AMO spec, for info about data spec. see [1]' % url)
# try cached version / history-linked-uri
def parse(self, yr, mon):
"""
Convert year and month to a datatime object, day hardcoded to 2nd day of each month
:param yr: year date integer or string
:param mon: month date integer or string
:return: datatime object (time stamp)
:rtype: datatime
"""
date = datetime(year=int(yr), day=2, month=int(mon))
return date
def amo_get(self, url='http://www.esrl.noaa.gov/psd/data/correlation/amon.us.long.data', save=None, csvout='amo.csv',
prov=False, verbose=False):
"""
read AMO data from url and return a pandas dataframe
:param str url: url to data online default is set to : http://www.cdc.noaa.gov/Correlation/amon.us.long.data
:param str save: directory where to save raw data as csv
:return: amodata as pandas dataframe
:rtype: pandas dataframe
"""
try:
ts_raw = pd.read_table(url, sep=' ', skiprows=1,
names=['year', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct',
'nov', 'dec'], skipinitialspace=True, parse_dates=True, skipfooter=4,
index_col=0)
if verbose:
print('dataset used: %s' % url)
ts_raw.replace(-9.99900000e+01, np.NAN, inplace=True)
amodata = ts_raw.mean(axis=1)
amodata.name = "amo"
amodata = pd.DataFrame(amodata)
if save:
eu.ensure_dir(save)
output = os.path.join(save, csvout)
amodata.to_csv(output, sep=',', header=True, index=True, index_label='Date')
if verbose:
print('data saved as %s ' % output)
if prov:
function = {}
function['name'] = 'amo_get'
function['parameters'] = {}
function['parameters']['url'] = url
function['parameters']['save'] = save
function['parameters']['csvout'] = csvout
jsonld = {
"@id": "ex:AMO_dataset",
"@type": ["prov:Entity", "ecoop:Dataset"],
"ecoop_ext:hasCode": {
"@id": "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.amo_get",
"@type": "ecoop_ext:Code",
"ecoop_ext:hasFunction_src_code_link": "http://epinux.com/shared/pyecoop_doc/ecoop.html#ecoop.cf.cfData.amo_get",
"ecoop_ext:hasParameter": [
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "csvout",
"ecoop_ext:parameter_value": csvout
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "save",
"ecoop_ext:parameter_value": save
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "url",
"ecoop_ext:parameter_value": url
}
]
}
}
#display('amo_get - metadata saved', metadata={'ecoop_prov': jsonld})
#print(jsonld)
display(Javascript("IPython.notebook.metadata.ecoop_prov['amo_get'] = '%s'" % jsonld))
#display(Javascript("IPython.notebook.metadata.ecoop_prov.amo_get = {}".format(jsonld)))
return amodata
except:
print(
'unable to fetch the data, check if %s is a valid address and data is conform to AMO spec, for info about data spec. see [1]' % url)
# try cached version / history-linked-uri
class cfPlot():
def plot_index(self, data, name='Index', dataurl=False,
nb=True, datarange=None,
xticks=10, xticks_fontsize=10,
dateformat=False, fig_height=6, fig_width=4,
xmargin=True, ymargin=True,
legend=True, smoother=None,
output=None, dpi=300,
grid=True, xlabel='Year',
ylabel='', title='',
win_size=10, win_type='boxcar',
center=False, std=0.1,
beta=0.1, power=1, width=1,
min_periods=None, freq=None,
scategory=None, frac=1. / 3, it=3, figsave=None, prov=False, verbose=False):
"""
Function to plot the Climate Forcing indicator for the ESR 2013, it follow graphic guidlines from the past ESR
adding functionalities like :
several kind of smoothline with different
:param data: pandas dataframe - input data
:param name: string - name used as dataframe index
:param nb: bolean if True the function is optimized to render the png inside a notebook
:param datarange: list of 2 integer for mak min year
:param xticks: integer xtick spacing default=10
:param xticks_fontsize: integer xticks fontsize default=10
:param dateformat: boolean if True set the xticks labels in date format
:param fig_height: int figure height default 8
:param fig_width: int figure width default 10
:param xmargin: bolean default True
:param ymargin: bolean default True
:param legend: bolean default True
:param smoother: tuple (f,i)
:param output: directory where to save output default None
:param dpi: integer
:param grid: bolean default True
:param xlabel: string default 'Year'
:param ylabel: string default ''
:param title: string default ''
:param win_size: integer default 10
:param win_type: string default 'boxcar'
:param center: bolean default False
:param std: float default 0.1
:param beta: float default 0.1
:param power: integer default 1
:param width: integer default 1
:param min_periods: None
:param freq: None
:param str scategory: default 'rolling'
:param float frac: default 0.6666666666666666 Between 0 and 1. The fraction of the data used when estimating each y-value.,
:param int it: default 3 The number of residual-based reweightings to perform.
"""
try:
assert type(data) == pd.core.frame.DataFrame
#x = data.index.year
#y = data.values
if datarange:
#if datarange != None :
mind = np.datetime64(str(datarange[0]))
maxd = np.datetime64(str(datarange[1]))
newdata = data.ix[mind:maxd]
x = newdata.index.year
y = newdata.values
else:
x = data.index.year
y = data.values
x_p = x[np.where(y >= 0)[0]]
y_p = y[np.where(y >= 0)[0]]
x_n = x[np.where(y < 0)[0]]
y_n = y[np.where(y < 0)[0]]
fig = plt.figure(figsize=(fig_height, fig_width))
ax1 = fig.add_subplot(111)
ax1.bar(x_n, y_n, 0.8, facecolor='b', label=name + ' < 0')
ax1.bar(x_p, y_p, 0.8, facecolor='r', label=name + ' > 0')
ax1.grid(grid)
if ylabel != '':
ax1.set_ylabel(ylabel)
else:
ax1.set_ylabel(name)
if xlabel != '':
ax1.set_xlabel(xlabel)
else:
ax1.set_xlabel(xlabel)
if title == '':
ax1.set_title(name)
else:
ax1.set_title(title)
ax1.axhline(0, color='black', lw=1.5)
if xmargin:
ax1.set_xmargin(0.1)
if ymargin:
ax1.set_xmargin(0.1)
if legend:
ax1.legend()
if not figsave:
figsave = name + '.png'
if scategory == 'rolling':
newy = self.rolling_smoother(data, stype=smoother, win_size=win_size, win_type=win_type, center=center,
std=std,
beta=beta, power=power, width=width)
ax1.plot(newy.index.year, newy.values, lw=3, color='g')
if scategory == 'expanding':
newy = self.expanding_smoother(data, stype=smoother, min_periods=min_periods, freq=freq)
ax1.plot(newy.index.year, newy.values, lw=3, color='g')
if scategory == 'lowess':
x = np.array(range(0, len(data.index.values))).T
newy = pd.Series(lowess(data.values.flatten(), x, frac=frac, it=it).T[1], index=data.index)
ax1.plot(newy.index.year, newy, lw=3, color='g')
## interp 1D attempt
xx = np.linspace(min(data.index.year), max(data.index.year), len(newy))
f = interp1d(xx, newy)
xnew = np.linspace(min(data.index.year), max(data.index.year), len(newy) * 4)
f2 = interp1d(xx, newy, kind='cubic')
#xnew = np.linspace(min(data.index.values), max(data.index.values), len(newy)*2)
ax1.plot(xx, newy, 'o', xnew, f(xnew), '-', xnew, f2(xnew), '--')
##
if scategory == 'ewma':
print('todo')
plt.xticks(data.index.year[::xticks].astype('int'), data.index.year[::xticks].astype('int'),
fontsize=xticks_fontsize)
plt.autoscale(enable=True, axis='both', tight=True)
if dateformat:
fig.autofmt_xdate(bottom=0.2, rotation=75, ha='right')
if output:
eu.ensure_dir(output)
ffigsave = os.path.join(output, figsave)
plt.savefig(ffigsave, dpi=dpi)
if verbose:
print('graph saved in: %s ' % ffigsave)
if scategory:
smoutput = name + '_' + scategory + '.csv'
if smoother:
smoutput = name + '_' + scategory + '_' + smoother + '.csv'
smoutput = os.path.join(output, smoutput)
if scategory == 'lowess':
newdataframe = data.copy(deep=True)
newdataframe['smooth'] = pd.Series(newy, index=data.index)
newdataframe.to_csv(smoutput, sep=',', header=True, index=True, index_label='Year')
else:
newy.to_csv(smoutput, sep=',', header=True, index=True, index_label='Year')
if verbose:
print(name + ' smoothed data saved in : %s ' % smoutput)
if nb:
fig.subplots_adjust(left=-1.0)
fig.subplots_adjust(right=1.0)
#plt.show()
if prov:
datalink=" no link to data provided"
if dataurl:
datalink=dataurl
jsonld = {
"@id": "ex:%s" % figsave,
"@type": ["prov:Entity", "ecoop:Figure"],
"ecoop_ext:hasData": "%s" % name,
"ecoop_ext:hasCode": {
"@type": "ecoop_ext:Code",
"ecoop_ext:hasFunction_src_code_link": "",
"ecoop_ext:hasParameter": [
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "beta",
"ecoop_ext:parameter_value": "%s" % beta
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "center",
"ecoop_ext:parameter_value": "%s" % center
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "data",
"ecoop_ext:parameter_value": "%s" % datalink
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "datarange",
"ecoop_ext:parameter_value": "%s" % datarange
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "dateformat",
"ecoop_ext:parameter_value": "%s" % dateformat
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "dpi",
"ecoop_ext:parameter_value": "%s" % dpi
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "figsave",
"ecoop_ext:parameter_value": "%s" % figsave
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "fig_height",
"ecoop_ext:parameter_value": "%s" % fig_height
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "fig_width",
"ecoop_ext:parameter_value": "%s" % fig_width
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "frac",
"ecoop_ext:parameter_value": "%s" % frac
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "freq",
"ecoop_ext:parameter_value": "%s" % freq
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "grid",
"ecoop_ext:parameter_value": "%s" % grid
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "it",
"ecoop_ext:parameter_value": "%s" % it
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "legend",
"ecoop_ext:parameter_value": "%s" % legend
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "min_periods",
"ecoop_ext:parameter_value": "%s" % min_periods
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "name",
"ecoop_ext:parameter_value": "%s" % name
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "nb",
"ecoop_ext:parameter_value": "%s" % nb
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "output",
"ecoop_ext:parameter_value": "%s" % output
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "power",
"ecoop_ext:parameter_value": "%s" % power
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "scategory",
"ecoop_ext:parameter_value": "%s" % scategory
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "smoother",
"ecoop_ext:parameter_value": "%s" % smoother
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "std",
"ecoop_ext:parameter_value": "%s" % std
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "title",
"ecoop_ext:parameter_value": "%s" % title
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "width",
"ecoop_ext:parameter_value": width
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "win_size",
"ecoop_ext:parameter_value": "%s" % win_size
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "win_type",
"ecoop_ext:parameter_value": "%s" % win_type
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "xlabel",
"ecoop_ext:parameter_value": "%s" % xlabel
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "xmargin",
"ecoop_ext:parameter_value": "%s" % xmargin
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "xticks",
"ecoop_ext:parameter_value": "%s" % xticks
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "xticks_fontsize",
"ecoop_ext:parameter_value": "%s" % xticks_fontsize
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "ylabel",
"ecoop_ext:parameter_value": "%s" % ylabel
},
{
"@type": "ecoop_ext:Parameter",
"ecoop_ext:parameter_name": "ymargin",
"ecoop_ext:parameter_value": "%s" % ymargin
}
]
},
"ecoop_ext:usedSoftware": [{"@id": "ex:ecoop_software"}, {"@id": "ex:ipython_software"}]
}
#display('cell-output metadata saved', metadata={'ecoop_prov': jsonld})
#print(jsonld)
provname=figsave.replace(".","_")
display(Javascript("IPython.notebook.metadata.ecoop_prov.%s = %s" % (provname,jsonld)))
#display(Javascript("IPython.notebook.metadata.ecoop_prov.plot_index = {}".format(jsonld)))
#pyplot.show_bokeh(plt.gcf(), filename="subplots.html")
plt.show()
except AssertionError:
if type(data) != pd.core.frame.DataFrame:
print('input data not compatible, it has to be of type : pandas.core.frame.DataFrame')
print('data not loaded correctly')
def rolling_smoother(self, data, stype='rolling_mean', win_size=10, win_type='boxcar', center=False, std=0.1,
beta=0.1,
power=1, width=1):
"""
Perform a espanding smooting on the data for a complete help refer to http://pandas.pydata.org/pandas-docs/dev/computation.html
:param data:
:param stype:
:param win_size:
:param win_type:
:param center:
:param std:
:param beta:
:param power:
:param width:
:moothing types:
ROLLING :
rolling_count Number of non-null observations
rolling_sum Sum of values
rolling_mean Mean of values
rolling_median Arithmetic median of values
rolling_min Minimum
rolling_max Maximum
rolling_std Unbiased standard deviation
rolling_var Unbiased variance
rolling_skew Unbiased skewness (3rd moment)
rolling_kurt Unbiased kurtosis (4th moment)
rolling_window Moving window function
window types:
boxcar
triang
blackman
hamming
bartlett
parzen
bohman
blackmanharris
nuttall
barthann
kaiser (needs beta)
gaussian (needs std)
general_gaussian (needs power, width)
slepian (needs width)
"""
if stype == 'count':
newy = pd.rolling_count(data, win_size)
if stype == 'sum':
newy = pd.rolling_sum(data, win_size)
if stype == 'mean':
newy = pd.rolling_mean(data, win_size)
if stype == 'median':
newy = pd.rolling_median(data, win_size)
if stype == 'min':
newy = pd.rolling_min(data, win_size)
if stype == 'max':
newy = pd.rolling_max(data, win_size)
if stype == 'std':
newy = pd.rolling_std(data, win_size)
if stype == 'var':
newy = pd.rolling_var(data, win_size)
if stype == 'skew':
newy = pd.rolling_skew(data, win_size)
if stype == 'kurt':
newy = pd.rolling_kurt(data, win_size)
if stype == 'window':
if win_type == 'kaiser':
newy = pd.rolling_window(data, win_size, win_type, center=center, beta=beta)
if win_type == 'gaussian':
newy = pd.rolling_window(data, win_size, win_type, center=center, std=std)
if win_type == 'general_gaussian':
newy = pd.rolling_window(data, win_size, win_type, center=center, power=power, width=width)
else:
newy = pd.rolling_window(data, win_size, win_type, center=center)
return newy
def expanding_smoother(self, data, stype='rolling_mean', min_periods=None, freq=None):
"""
Perform a expanding smooting on the data for a complete help refer to http://pandas.pydata.org/pandas-docs/dev/computation.html
:param data: pandas dataframe input data
:param stype: soothing type
:param min_periods: periods
:param freq: frequence
smoothing types:
expanding_count Number of non-null observations
expanding_sum Sum of values
expanding_mean Mean of values
expanding_median Arithmetic median of values
expanding_min Minimum
expanding_max Maximum
expandingg_std Unbiased standard deviation
expanding_var Unbiased variance
expanding_skew Unbiased skewness (3rd moment)
expanding_kurt Unbiased kurtosis (4th moment)
"""
if stype == 'count':
newy = pd.expanding_count(data, min_periods=min_periods, freq=freq)
if stype == 'sum':
newy = pd.expanding_sum(data, min_periods=min_periods, freq=freq)
if stype == 'mean':
newy = pd.expanding_mean(data, min_periods=min_periods, freq=freq)
if stype == 'median':
newy = pd.expanding_median(data, min_periods=min_periods, freq=freq)
if stype == 'min':
newy = pd.expanding_min(data, min_periods=min_periods, freq=freq)
if stype == 'max':
newy = pd.expanding_max(data, min_periods=min_periods, freq=freq)
if stype == 'std':
newy = pd.expanding_std(data, min_periods=min_periods, freq=freq)
if stype == 'var':
newy = pd.expanding_var(data, min_periods=min_periods, freq=freq)
if stype == 'skew':
newy = pd.expanding_skew(data, min_periods=min_periods, freq=freq)
if stype == 'kurt':
newy = pd.expanding_kurt(data, min_periods=min_periods, freq=freq)
return newy
|
epifanio/ecoop-1
|
pyecoop/lib/ecoop/cf.py
|
Python
|
apache-2.0
| 36,260
|
[
"Gaussian"
] |
15f015a19d1cc55c9f275b9a1adefe01ea617fcf1dac59dbbf9e28dfee4b02f4
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
import glob
import os
import StringIO
import unittest
from xml.etree import ElementTree
from docutils.core import publish_doctree
import stoq
class TestAPIDoc(unittest.TestCase):
def setUp(self):
self.errors = []
def test_apidoc(self):
for rst_name in glob.glob('docs/api/*.rst'):
self.check_filename(rst_name)
if self.errors:
self.fail('ERROR: ' + '\n'.join(sorted(self.errors)))
def _list_modules(self, rst_filename):
path = os.path.basename(rst_filename)[:-4].replace('.', '/')
dir_name = os.path.abspath(
os.path.join(
os.path.dirname(stoq.__file__), '..', path))
py_files = glob.glob('%s/*.py' % (dir_name, ))
modules = [os.path.basename(f)[:-3] for f in py_files]
# stoqlib.domain is special cases as we merged the payment module
# into one documentation file.
if path == 'stoqlib/domain':
modules.extend('payment.%s' % (os.path.basename(f)[:-3], )
for f in glob.glob('stoqlib/domain/payment/*.py'))
modules.remove('payment.__init__')
try:
modules.remove('__init__')
except ValueError:
pass
return modules
def check_filename(self, rst_filename):
# Skip l10n modules that needs to be cleaned up
if os.path.basename(rst_filename) in [
'stoqlib.l10n.generic.rst',
'stoqlib.l10n.br.rst',
'stoqlib.l10n.sv.rst']:
return
# List all modules
modules = self._list_modules(rst_filename)
# Parse RST
rst_data = open(rst_filename).read()
doc = publish_doctree(
rst_data,
settings_overrides={
'input_encoding': 'utf-8',
'warning_stream': StringIO.StringIO()})
# Convert to an XML string
xml = doc.asdom().toxml()
# Parse with ElementTree
doctree = ElementTree.fromstring(xml)
for section in doctree.findall('section'):
name = section.attrib.get('names')
if not name.startswith(':mod:'):
continue
if name.endswith(' package'):
continue
if name == 'subpackage':
continue
# Check for removed python modules
module_name = name[5:].split('`')[1]
try:
modules.remove(module_name)
except ValueError:
self.errors.append('%s: %s module does not exist' % (
rst_filename, module_name))
# Check for missing python modules
for module in modules:
# Skip test modules
if module.startswith('test_'):
continue
# Skip a couple of external modules
if module in [
'generictreemodel',
'gicompat']:
continue
self.errors.append('%s: %s module is missing' % (
rst_filename, module))
if __name__ == '__main__':
unittest.main()
|
andrebellafronte/stoq
|
tests/test_apidocs.py
|
Python
|
gpl-2.0
| 4,031
|
[
"VisIt"
] |
1f6be9fd4c755f28066001e5d8f276401ed18d684ff4f56253d08f1b304588dc
|
import numpy as np
from numpy.testing import assert_array_equal
from nose.tools import assert_true
from sklearn.datasets import make_blobs
from pystruct.models import BinaryClf
from pystruct.learners import (NSlackSSVM, SubgradientSSVM,
OneSlackSSVM)
def test_model_1d():
# 10 1d datapoints between -1 and 1
np.random.seed(0)
X = np.random.uniform(size=(10, 1))
# linearly separable labels
Y = 1 - 2 * (X.ravel() < .5)
pbl = BinaryClf(n_features=2)
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
w = [1, -.5]
Y_pred = np.hstack([pbl.inference(x, w) for x in X])
assert_array_equal(Y, Y_pred)
# check that sign of joint_feature and inference agree
for x, y in zip(X, Y):
assert_true(np.dot(w, pbl.joint_feature(x, y)) > np.dot(w, pbl.joint_feature(x, -y)))
# check that sign of joint_feature and the sign of y correspond
for x, y in zip(X, Y):
assert_true(np.dot(w, pbl.joint_feature(x, y)) == -np.dot(w, pbl.joint_feature(x, -y)))
def test_simple_1d_dataset_cutting_plane():
# 10 1d datapoints between 0 and 1
X = np.random.uniform(size=(30, 1))
# linearly separable labels
Y = 1 - 2 * (X.ravel() < .5)
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
pbl = BinaryClf(n_features=2)
svm = NSlackSSVM(pbl, check_constraints=True, C=1000)
svm.fit(X, Y)
assert_array_equal(Y, np.hstack(svm.predict(X)))
def test_blobs_2d_cutting_plane():
# make two gaussian blobs
X, Y = make_blobs(n_samples=80, centers=2, random_state=1)
Y = 2 * Y - 1
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:]
pbl = BinaryClf(n_features=3)
svm = NSlackSSVM(pbl, check_constraints=True, C=1000)
svm.fit(X_train, Y_train)
assert_array_equal(Y_test, np.hstack(svm.predict(X_test)))
def test_blobs_2d_subgradient():
# make two gaussian blobs
X, Y = make_blobs(n_samples=80, centers=2, random_state=1)
Y = 2 * Y - 1
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:]
pbl = BinaryClf(n_features=3)
svm = SubgradientSSVM(pbl, C=1000)
svm.fit(X_train, Y_train)
assert_array_equal(Y_test, np.hstack(svm.predict(X_test)))
def test_blobs_2d_one_slack():
# make two gaussian blobs
X, Y = make_blobs(n_samples=80, centers=2, random_state=1)
Y = 2 * Y - 1
# we have to add a constant 1 feature by hand :-/
X = np.hstack([X, np.ones((X.shape[0], 1))])
X_train, X_test, Y_train, Y_test = X[:40], X[40:], Y[:40], Y[40:]
pbl = BinaryClf(n_features=3)
svm = OneSlackSSVM(pbl, C=1000)
svm.fit(X_train, Y_train)
assert_array_equal(Y_test, np.hstack(svm.predict(X_test)))
def test_blobs_batch():
# make two gaussian blobs
X, Y = make_blobs(n_samples=80, centers=2, random_state=1)
Y = 2 * Y - 1
pbl = BinaryClf(n_features=2)
# test joint_feature
joint_feature_mean = pbl.batch_joint_feature(X, Y)
joint_feature_mean2 = np.sum([pbl.joint_feature(x, y) for x, y in zip(X, Y)], axis=0)
assert_array_equal(joint_feature_mean, joint_feature_mean2)
# test inference
w = np.random.uniform(-1, 1, size=pbl.size_joint_feature)
Y_hat = pbl.batch_inference(X, w)
for i, (x, y_hat) in enumerate(zip(X, Y_hat)):
assert_array_equal(Y_hat[i], pbl.inference(x, w))
# test inference
Y_hat = pbl.batch_loss_augmented_inference(X, Y, w)
for i, (x, y, y_hat) in enumerate(zip(X, Y, Y_hat)):
assert_array_equal(Y_hat[i], pbl.loss_augmented_inference(x, y, w))
def test_break_ties():
pbl = BinaryClf(n_features=2)
X = np.array([[-1., -1.], [-1., 1.], [1., 1.]])
w = np.array([1., 1.])
assert_array_equal(pbl.batch_inference(X, w), np.array([-1, 1, 1]))
|
d-mittal/pystruct
|
pystruct/tests/test_learners/test_binary_svm.py
|
Python
|
bsd-2-clause
| 4,085
|
[
"Gaussian"
] |
7754a301dc65ea8e35353a10cfe78a1863fc2539c85c732b526dd942dc61adbf
|
import logging
from cis.data_io.Coord import CoordList
from cis.data_io.products import AProduct
from cis.data_io.ungridded_data import UngriddedCoordinates, UngriddedData
from abc import ABCMeta, abstractmethod
class CCI(object):
"""
Abstract class for the various possible data products. This just defines the interface which
the subclasses must implement.
"""
__metaclass__ = ABCMeta
def _create_coord_list(self, filenames):
from cis.data_io.netcdf import read_many_files_individually, get_metadata
from cis.data_io.Coord import Coord
from cis.exceptions import InvalidVariableError
try:
variables = ["lon", "lat", "time"]
data = read_many_files_individually(filenames, variables)
except InvalidVariableError:
variables = ["longitude", "latitude", "time"]
data = read_many_files_individually(filenames, variables)
logging.info("Listing coordinates: " + str(variables))
coords = CoordList()
coords.append(Coord(data[variables[0]], get_metadata(data[variables[0]][0]), "X"))
coords.append(Coord(data[variables[1]], get_metadata(data[variables[1]][0]), "Y"))
coords.append(self._fix_time(Coord(data[variables[2]], get_metadata(data[variables[2]][0]), "T")))
return coords
@abstractmethod
def _fix_time(self, coords):
pass
def create_coords(self, filenames, variable=None):
return UngriddedCoordinates(self._create_coord_list(filenames))
def create_data_object(self, filenames, variable):
from cis.data_io.netcdf import get_metadata, read_many_files_individually
coords = self._create_coord_list(filenames)
var = read_many_files_individually(filenames, [variable])
metadata = get_metadata(var[variable][0])
return UngriddedData(var[variable], metadata, coords)
class Cloud_CCI(CCI, AProduct):
def get_file_signature(self):
return [r'..*ESACCI.*CLOUD.*']
def _fix_time(self, coord):
coord.convert_julian_to_std_time()
return coord
def get_file_format(self, filenames):
return "NetCDF/Cloud_CCI"
class Aerosol_CCI(CCI, AProduct):
valid_dimensions = ["pixel_number"]
def get_file_signature(self):
return [r'.*ESACCI.*AEROSOL.*']
def _fix_time(self, coord):
import datetime
coord.convert_TAI_time_to_std_time(datetime.datetime(1970, 1, 1))
return coord
def get_file_format(self, filename):
return "NetCDF/Aerosol_CCI"
|
zak-k/cis
|
cis/data_io/products/CCI.py
|
Python
|
gpl-3.0
| 2,569
|
[
"NetCDF"
] |
e172891e152e42ad9c85944c5a764c210d1cb5b581f63c8f73d79dc6aa11290f
|
# This file is part of Androguard.
#
# Copyright (c) 2012 Geoffroy Gueguen <geoffroy.gueguen@gmail.com>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from androguard.decompiler.dad.util import get_type, ACCESS_FLAGS_METHODS
from androguard.decompiler.dad.opcode_ins import Op
from androguard.decompiler.dad.instruction import (Constant, ThisParam,
BinaryExpression,
BinaryCompExpression)
logger = logging.getLogger('dad.writer')
class Writer(object):
def __init__(self, graph, method):
self.graph = graph
self.method = method
self.visited_nodes = set()
self.ind = 4
self.buffer = []
self.loop_follow = [None]
self.if_follow = [None]
self.switch_follow = [None]
self.latch_node = [None]
self.try_follow = [None]
self.next_case = None
self.skip = False
self.need_break = True
def __str__(self):
return ''.join(self.buffer)
def inc_ind(self, i=1):
self.ind += (4 * i)
def dec_ind(self, i=1):
self.ind -= (4 * i)
def space(self):
if self.skip:
self.skip = False
return ''
return ' ' * self.ind
def write_ind(self):
if self.skip:
self.skip = False
else:
self.write(self.space())
def write(self, s):
self.buffer.append(s)
def end_ins(self):
self.write(';\n')
def write_ind_visit_end(self, lhs, s, rhs=None):
self.write_ind()
lhs.visit(self)
self.write(s)
if rhs is not None:
rhs.visit(self)
self.end_ins()
def write_inplace_if_possible(self, lhs, rhs):
if isinstance(rhs, BinaryExpression) and lhs == rhs.var_map[rhs.arg1]:
exp_rhs = rhs.var_map[rhs.arg2]
if rhs.op in '+-' and isinstance(exp_rhs, Constant) and\
exp_rhs.get_int_value() == 1:
return self.write_ind_visit_end(lhs, rhs.op * 2)
return self.write_ind_visit_end(lhs, ' %s= ' % rhs.op, exp_rhs)
return self.write_ind_visit_end(lhs, ' = ', rhs)
def visit_ins(self, ins):
ins.visit(self)
def write_method(self):
acc = []
access = self.method.access
self.constructor = False
for modifier in access:
if modifier == 'constructor':
self.constructor = True
continue
acc.append(modifier)
if self.constructor:
name = get_type(self.method.cls_name).split('.')[-1]
proto = '%s %s' % (' '.join(acc), name)
else:
name = self.method.name
proto = '%s %s %s' % (
' '.join(acc), get_type(self.method.type), name)
self.write('\n%s%s' % (self.space(), proto))
params = self.method.lparams
if 'static' not in access:
params = params[1:]
proto = ''
if self.method.params_type:
proto = ', '.join(['%s p%s' % (get_type(p_type), param) for
p_type, param in zip(self.method.params_type, params)])
self.write('(%s)' % proto)
if self.graph is None:
return self.write(';\n')
self.write('\n%s{\n' % self.space())
self.inc_ind()
self.visit_node(self.graph.entry)
self.dec_ind()
self.write('%s}\n' % self.space())
def visit_node(self, node):
if node in (self.if_follow[-1], self.switch_follow[-1],
self.loop_follow[-1], self.latch_node[-1],
self.try_follow[-1]):
return
if not node.type.is_return and node in self.visited_nodes:
return
self.visited_nodes.add(node)
for var in node.var_to_declare:
var.visit_decl(self)
var.declared = True
node.visit(self)
def visit_loop_node(self, loop):
follow = loop.follow['loop']
if follow is None and not loop.looptype.is_endless:
logger.error('Loop has no follow !')
if loop.looptype.is_pretest:
if loop.true is follow:
loop.neg()
loop.true, loop.false = loop.false, loop.true
self.write('%swhile (' % self.space())
loop.visit_cond(self)
self.write(') {\n')
elif loop.looptype.is_posttest:
self.write('%sdo {\n' % self.space())
self.latch_node.append(loop.latch)
elif loop.looptype.is_endless:
self.write('%swhile(true) {\n' % self.space())
self.inc_ind()
self.loop_follow.append(follow)
if loop.looptype.is_pretest:
self.visit_node(loop.true)
else:
self.visit_node(loop.cond)
self.loop_follow.pop()
self.dec_ind()
if loop.looptype.is_pretest:
self.write('%s}\n' % self.space())
elif loop.looptype.is_posttest:
self.latch_node.pop()
self.write('%s} while(' % self.space())
loop.latch.visit_cond(self)
self.write(');\n')
else:
self.inc_ind()
self.visit_node(loop.latch)
self.dec_ind()
self.write('%s}\n' % self.space())
if follow is not None:
self.visit_node(follow)
def visit_cond_node(self, cond):
follow = cond.follow['if']
if cond.false is cond.true:
self.write('%s// Both branches of the conditions point to the same'
' code.\n' % self.space())
self.write('%s// if (' % self.space())
cond.visit_cond(self)
self.write(') {\n')
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s// }\n' % self.space())
return
if cond.false is self.loop_follow[-1]:
cond.neg()
cond.true, cond.false = cond.false, cond.true
if self.loop_follow[-1] in (cond.true, cond.false):
self.write('%sif (' % self.space())
cond.visit_cond(self)
self.write(') {\n')
self.inc_ind()
self.write('%sbreak;\n' % self.space())
self.dec_ind()
self.write('%s}\n' % self.space())
self.visit_node(cond.false)
elif follow is not None:
if cond.true in (follow, self.next_case) or\
cond.num > cond.true.num:
# or cond.true.num > cond.false.num:
cond.neg()
cond.true, cond.false = cond.false, cond.true
self.if_follow.append(follow)
if not cond.true in self.visited_nodes:
self.write('%sif (' % self.space())
cond.visit_cond(self)
self.write(') {\n')
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
is_else = not (follow in (cond.true, cond.false))
if is_else and not cond.false in self.visited_nodes:
self.write('%s} else {\n' % self.space())
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.if_follow.pop()
self.write('%s}\n' % self.space())
self.visit_node(follow)
else:
self.write('%sif (' % self.space())
cond.visit_cond(self)
self.write(') {\n')
self.inc_ind()
self.visit_node(cond.true)
self.dec_ind()
self.write('%s} else {\n' % self.space())
self.inc_ind()
self.visit_node(cond.false)
self.dec_ind()
self.write('%s}\n' % self.space())
def visit_short_circuit_condition(self, nnot, aand, cond1, cond2):
if nnot:
cond1.neg()
self.write('(')
cond1.visit_cond(self)
self.write(') %s (' % ['||', '&&'][aand])
cond2.visit_cond(self)
self.write(')')
def visit_switch_node(self, switch):
lins = switch.get_ins()
for ins in lins[:-1]:
self.visit_ins(ins)
switch_ins = switch.get_ins()[-1]
self.write('%sswitch (' % self.space())
self.visit_ins(switch_ins)
self.write(') {\n')
follow = switch.follow['switch']
cases = switch.cases
self.switch_follow.append(follow)
default = switch.default
for i, node in enumerate(cases):
if node in self.visited_nodes:
continue
self.inc_ind()
for case in switch.node_to_case[node]:
self.write('%scase %d:\n' % (self.space(), case))
if i + 1 < len(cases):
self.next_case = cases[i + 1]
else:
self.next_case = None
if node is default:
self.write('%sdefault:\n' % self.space())
default = None
self.inc_ind()
self.visit_node(node)
if self.need_break:
self.write('%sbreak;\n' % self.space())
else:
self.need_break = True
self.dec_ind(2)
if default not in (None, follow):
self.inc_ind()
self.write('%sdefault:\n' % self.space())
self.inc_ind()
self.visit_node(default)
self.dec_ind(2)
self.write('%s}\n' % self.space())
self.switch_follow.pop()
self.visit_node(follow)
def visit_statement_node(self, stmt):
sucs = self.graph.sucs(stmt)
for ins in stmt.get_ins():
self.visit_ins(ins)
if len(sucs) == 1:
if sucs[0] is self.loop_follow[-1]:
self.write('%sbreak;\n' % self.space())
elif sucs[0] is self.next_case:
self.need_break = False
else:
self.visit_node(sucs[0])
def visit_try_node(self, try_node):
self.write('%stry {\n' % self.space())
self.inc_ind()
self.try_follow.append(try_node.follow)
self.visit_node(try_node.try_start)
self.dec_ind()
self.write('%s}' % self.space())
for catch in try_node.catch:
self.visit_node(catch)
self.write('\n')
self.visit_node(self.try_follow.pop())
def visit_catch_node(self, catch_node):
self.write(' catch (')
catch_node.visit_exception(self)
self.write(') {\n')
self.inc_ind()
self.visit_node(catch_node.catch_start)
self.dec_ind()
self.write('%s}' % self.space())
def visit_return_node(self, ret):
self.need_break = False
for ins in ret.get_ins():
self.visit_ins(ins)
def visit_throw_node(self, throw):
for ins in throw.get_ins():
self.visit_ins(ins)
def visit_decl(self, var):
if not var.declared:
var_type = var.get_type() or 'unknownType'
self.write('%s%s v%s' % (
self.space(), get_type(var_type), var.value()))
self.end_ins()
def visit_constant(self, cst):
if isinstance(cst, str) or isinstance(cst, unicode):
return self.write(string(cst))
self.write('%r' % cst)
def visit_base_class(self, cls):
self.write(cls)
def visit_variable(self, var):
if not var.declared:
var_type = var.get_type() or 'unknownType'
self.write('%s ' % get_type(var_type))
var.declared = True
self.write('v%s' % var.value())
def visit_param(self, param):
self.write('p%s' % param)
def visit_this(self):
self.write('this')
def visit_assign(self, lhs, rhs):
if lhs is not None:
return self.write_inplace_if_possible(lhs, rhs)
self.write_ind()
rhs.visit(self)
if not self.skip:
self.end_ins()
def visit_move_result(self, lhs, rhs):
self.write_ind_visit_end(lhs, ' = ', rhs)
def visit_move(self, lhs, rhs):
if lhs is not rhs:
self.write_inplace_if_possible(lhs, rhs)
def visit_astore(self, array, index, rhs):
self.write_ind()
array.visit(self)
self.write('[')
if isinstance(index, Constant):
index.visit(self, 'I')
else:
index.visit(self)
self.write('] = ')
rhs.visit(self)
self.end_ins()
def visit_put_static(self, cls, name, rhs):
self.write_ind()
self.write('%s.%s = ' % (cls, name))
rhs.visit(self)
self.end_ins()
def visit_put_instance(self, lhs, name, rhs):
self.write_ind_visit_end(lhs, '.%s = ' % name, rhs)
def visit_new(self, atype):
self.write('new %s' % get_type(atype))
def visit_invoke(self, name, base, ptype, rtype, args):
if isinstance(base, ThisParam):
if name == '<init>' and self.constructor and len(args) == 0:
self.skip = True
return
base.visit(self)
if name != '<init>':
self.write('.%s' % name)
self.write('(')
comma = False
for arg in args:
if comma:
self.write(', ')
comma = True
arg.visit(self)
self.write(')')
def visit_return_void(self):
self.write_ind()
self.write('return')
self.end_ins()
def visit_return(self, arg):
self.write_ind()
self.write('return ')
arg.visit(self)
self.end_ins()
def visit_nop(self):
pass
def visit_switch(self, arg):
arg.visit(self)
def visit_check_cast(self, arg, atype):
self.write('(checkcast)(')
arg.visit(self)
self.write(', %s)' % atype)
def visit_aload(self, array, index):
array.visit(self)
self.write('[')
index.visit(self)
self.write(']')
def visit_alength(self, array):
array.visit(self)
self.write('.length')
def visit_new_array(self, atype, size):
self.write('new %s[' % get_type(atype[1:]))
size.visit(self)
self.write(']')
def visit_filled_new_array(self, atype, size, args):
self.write('new %s {' % get_type(atype))
for idx, arg in enumerate(args):
arg.visit(self)
if idx + 1 < len(args):
self.write(', ')
self.write('})')
def visit_fill_array(self, array, value):
self.write_ind()
array.visit(self)
self.write(' = {')
data = value.get_data()
self.write(', '.join(['%d' % ord(c) for c in data[:-1]]))
self.write('}')
self.end_ins()
def visit_move_exception(self, var):
var.declared = True
var_type = var.get_type() or 'unknownType'
self.write('%s v%s' % (get_type(var_type), var.value()))
def visit_monitor_enter(self, ref):
self.write_ind()
self.write('synchronized(')
ref.visit(self)
self.write(') {\n')
self.inc_ind()
def visit_monitor_exit(self, ref):
self.dec_ind()
self.write_ind()
self.write('}\n')
def visit_throw(self, ref):
self.write_ind()
self.write('throw ')
ref.visit(self)
self.end_ins()
def visit_binary_expression(self, op, arg1, arg2):
self.write('(')
arg1.visit(self)
self.write(' %s ' % op)
arg2.visit(self)
self.write(')')
def visit_unary_expression(self, op, arg):
self.write('(%s ' % op)
arg.visit(self)
self.write(')')
def visit_cast(self, op, arg):
self.write('(%s ' % op)
arg.visit(self)
self.write(')')
def visit_cond_expression(self, op, arg1, arg2):
arg1.visit(self)
self.write(' %s ' % op)
arg2.visit(self)
def visit_condz_expression(self, op, arg):
if isinstance(arg, BinaryCompExpression):
arg.op = op
return arg.visit(self)
atype = arg.get_type()
if atype == 'Z':
if op is Op.EQUAL:
self.write('!')
arg.visit(self)
else:
arg.visit(self)
if atype in 'VBSCIJFD':
self.write(' %s 0' % op)
else:
self.write(' %s null' % op)
def visit_get_instance(self, arg, name):
arg.visit(self)
self.write('.%s' % name)
def visit_get_static(self, cls, name):
self.write('%s.%s' % (cls, name))
def string(s):
ret = ['"']
for c in s:
if c >= ' ' and c < '\x7f':
if c == "'" or c == '"' or c == '\\':
ret.append('\\')
ret.append(c)
continue
elif c <= '\x7f':
if c in ('\r', '\n', '\t'):
ret.append(c.encode('unicode-escape'))
continue
i = ord(c)
ret.append('\\u')
ret.append('%x' % (i >> 12))
ret.append('%x' % ((i >> 8) & 0x0f))
ret.append('%x' % ((i >> 4) & 0x0f))
ret.append('%x' % (i & 0x0f))
ret.append('"')
return ''.join(ret)
|
andymg/androguard
|
androguard/decompiler/dad/writer.py
|
Python
|
apache-2.0
| 17,957
|
[
"VisIt"
] |
1a87d3068918119cf407b49093d8a470953e00fd64ddcd701921e3d85de0f094
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RAgimicrorna(RPackage):
"""Processing and Differential Expression Analysis of Agilent microRNA
chips."""
homepage = "https://bioconductor.org/packages/AgiMicroRna"
git = "https://git.bioconductor.org/packages/AgiMicroRna.git"
version('2.34.0', commit='aaa8cdd70ed2696c313f6240ffbfa044f0d97a7a')
version('2.32.0', commit='681ae17d07e8e533f798a607b761b71a31f407d8')
version('2.30.0', commit='99b5a8284cfe3e93c3ae85a2436e87101b9599dd')
version('2.28.0', commit='62c4a12f1168c7aa1ab46d2c97090ef71478328e')
version('2.26.0', commit='6dd74bae47986f2a23d03e3f1f9f78f701dd8053')
depends_on('r@2.10:', type=('build', 'run'))
depends_on('r-biobase', type=('build', 'run'))
depends_on('r-limma', type=('build', 'run'))
depends_on('r-affy@1.22:', type=('build', 'run'))
depends_on('r-preprocesscore', type=('build', 'run'))
depends_on('r-affycoretools', type=('build', 'run'))
|
iulian787/spack
|
var/spack/repos/builtin/packages/r-agimicrorna/package.py
|
Python
|
lgpl-2.1
| 1,162
|
[
"Bioconductor"
] |
3639fc7b15ddf6808019b3547c3d509d6daa60600009646fc31c40bdf4c8b44a
|
# -*- coding: utf-8 -*-
#
# Mayavi documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 12 23:25:24 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
# Adding the current directory to the path, so that sphinx finds the
# extensions.
import sys, os
sys.path.append(os.path.abspath('sphinxext'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'traitsdoc']
## An autodocumentation processor, to insert title of functions before
## the auto-documented functions:
#def add_title(app, what, name, obj, options, signature, return_annotation):
# """ Add a section title with the name of the function before the
# docstring.
# """
# if what is not 'function':
# return
# short_name = name.split('.')[-1]
# extra_lines = """
#
#%s
#...........................................
#
# """ % short_name
# return extra_lines + signature, return_annotation
#
#
#def setup(app):
# """ Register our docstring processor.
# """
# app.connect('autodoc-process-signature', add_title)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'mayavi'
copyright = u'2008-2015, Enthought Inc.'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
d = {}
fname = os.path.join('..', '..', '..', 'mayavi', '__init__.py')
exec(compile(open(fname).read(), fname, 'exec'), d)
version = release = d['__version__']
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mayavidoc'
# A logo displayed in the html sidebar.
html_logo = 'mayavi-logo.png'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [('index', 'mayavi_user_guide.tex', 'Mayavi User Guide',
'Prabhu Ramachandran, Gael Varoquaux', 'manual')]
# Additional stuff for the LaTeX preamble.
latex_preamble = """
\definecolor{VerbatimColor}{rgb}{0.95,1,0.833}
\definecolor{VerbatimBorderColor}{rgb}{0.6,0.6,0.6}
"""
# A logo displayed on the cover page.
latex_logo = 'm2_about.jpg'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
################################################################################
# A hack:
import shutil
shutil.copyfile('../../CHANGES.txt', './auto/changes.rst')
|
dmsurti/mayavi
|
docs/source/mayavi/conf.py
|
Python
|
bsd-3-clause
| 5,322
|
[
"Mayavi"
] |
253465e1a22c7f94bd3686e5c6fe438049e929a0c2cfaf148b4e01d706ff0b9a
|
"""Translation of the BioNetGen example "Simple" from the BNG wiki.
http://bionetgen.org/index.php/Simple
"""
from pysb import *
Model()
# Physical and geometric constants
Parameter('NA', 6.0e23) # Avogadro's num
Parameter('f', 0.01) # scaling factor
Expression('Vo', f * 1e-10) # L
Expression('V', f * 3e-12) # L
# Initial concentrations
Parameter('EGF_conc', 2e-9) # nM
Expression('EGF0', EGF_conc * NA * Vo) # nM
Expression('EGFR0', f * 1.8e5) # copy per cell
# Rate constants
Expression('kp1', 9.0e7 / (NA * Vo)) # input /M/sec
Parameter('km1', 0.06) # /sec
Monomer('EGF', ['R'])
Monomer('EGFR', ['L', 'CR1', 'Y1068'], {'Y1068': ['U', 'P']})
Initial(EGF(R=None), EGF0)
Initial(EGFR(L=None, CR1=None, Y1068='U'), EGFR0)
Rule('egf_binds_egfr', EGF(R=None) + EGFR(L=None) <> EGF(R=1) % EGFR(L=1), kp1, km1)
# Species LR EGF(R!1).EGFR(L!1)
Observable('Lbound', EGF(R=ANY)) # Molecules
if __name__ == '__main__':
print __doc__, "\n", model
print "\nNOTE: This model code is designed to be imported and programatically " \
"manipulated,\nnot executed directly. The above output is merely a " \
"diagnostic aid."
|
neurord/pysb
|
pysb/examples/bngwiki_simple.py
|
Python
|
bsd-2-clause
| 1,206
|
[
"Avogadro"
] |
b6b898324e4a6d5dc1271fbc1b311a69e089de2441daf034c9e45e84b1c0c970
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
import sys
import os
import os.path as op
import shutil
import numpy as np
from scipy import linalg
from .fixes import partial
from .utils import (verbose, logger, run_subprocess, deprecated,
get_subjects_dir)
from .io.constants import FIFF
from .io.write import (start_file, start_block, write_float, write_int,
write_float_matrix, write_int_matrix, end_block,
end_file)
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.open import fiff_open
from .externals.six import string_types
# ############################################################################
# Compute BEM solution
# define VEC_DIFF(from,to,diff) {\
# (diff)[X] = (to)[X] - (from)[X];\
# The following approach is based on:
#
# de Munck JC: "A linear discretization of the volume conductor boundary
# integral equation using analytically integrated elements",
# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990
#
def _calc_beta(rk, rk_norm, rk1, rk1_norm):
"""These coefficients are used to calculate the magic vector omega"""
rkk1 = rk1[0] - rk[0]
size = np.sqrt(np.dot(rkk1, rkk1))
rkk1 /= size
num = rk_norm + np.dot(rk, rkk1)
den = rk1_norm + np.dot(rk1, rkk1)
res = np.log(num / den) / size
return res
def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area):
"""The linear potential matrix element computations"""
from .source_space import _fast_cross_nd_sum
omega = np.zeros((len(fros), 3))
# we replicate a little bit of the _get_solids code here for speed
v1 = tri_rr[np.newaxis, 0, :] - fros
v2 = tri_rr[np.newaxis, 1, :] - fros
v3 = tri_rr[np.newaxis, 2, :] - fros
triples = _fast_cross_nd_sum(v1, v2, v3)
l1 = np.sqrt(np.sum(v1 * v1, axis=1))
l2 = np.sqrt(np.sum(v2 * v2, axis=1))
l3 = np.sqrt(np.sum(v3 * v3, axis=1))
ss = (l1 * l2 * l3 +
np.sum(v1 * v2, axis=1) * l3 +
np.sum(v1 * v3, axis=1) * l2 +
np.sum(v2 * v3, axis=1) * l1)
solids = np.arctan2(triples, ss)
# We *could* subselect the good points from v1, v2, v3, triples, solids,
# l1, l2, and l3, but there are *very* few bad points. So instead we do
# some unnecessary calculations, and then omit them from the final
# solution. These three lines ensure we don't get invalid values in
# _calc_beta.
bad_mask = np.abs(solids) < np.pi / 1e6
l1[bad_mask] = 1.
l2[bad_mask] = 1.
l3[bad_mask] = 1.
# Calculate the magic vector vec_omega
beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis],
_calc_beta(v2, l2, v3, l3)[:, np.newaxis],
_calc_beta(v3, l3, v1, l1)[:, np.newaxis]]
vec_omega = (beta[2] - beta[0]) * v1
vec_omega += (beta[0] - beta[1]) * v2
vec_omega += (beta[1] - beta[2]) * v3
area2 = 2.0 * tri_area
n2 = 1.0 / (area2 * area2)
# leave omega = 0 otherwise
# Put it all together...
yys = [v1, v2, v3]
idx = [0, 1, 2, 0, 2]
for k in range(3):
diff = yys[idx[k - 1]] - yys[idx[k + 1]]
zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn)
omega[:, k] = -n2 * (area2 * zdots * 2. * solids -
triples * (diff * vec_omega).sum(axis=-1))
# omit the bad points from the solution
omega[bad_mask] = 0.
return omega
def _correct_auto_elements(surf, mat):
"""Improve auto-element approximation..."""
pi2 = 2.0 * np.pi
tris_flat = surf['tris'].ravel()
misses = pi2 - mat.sum(axis=1)
for j, miss in enumerate(misses):
# How much is missing?
n_memb = len(surf['neighbor_tri'][j])
# The node itself receives one half
mat[j, j] = miss / 2.0
# The rest is divided evenly among the member nodes...
miss /= (4.0 * n_memb)
members = np.where(j == tris_flat)[0]
mods = members % 3
offsets = np.array([[1, 2], [-1, 1], [-1, -2]])
tri_1 = members + offsets[mods, 0]
tri_2 = members + offsets[mods, 1]
for t1, t2 in zip(tri_1, tri_2):
mat[j, tris_flat[t1]] += miss
mat[j, tris_flat[t2]] += miss
return
def _fwd_bem_lin_pot_coeff(surfs):
"""Calculate the coefficients for linear collocation approach"""
# taken from fwd_bem_linear_collocation.c
nps = [surf['np'] for surf in surfs]
np_tot = sum(nps)
coeff = np.zeros((np_tot, np_tot))
offsets = np.cumsum(np.concatenate(([0], nps)))
for si_1, surf1 in enumerate(surfs):
rr_ord = np.arange(nps[si_1])
for si_2, surf2 in enumerate(surfs):
logger.info(" %s (%d) -> %s (%d) ..." %
(_bem_explain_surface(surf1['id']), nps[si_1],
_bem_explain_surface(surf2['id']), nps[si_2]))
tri_rr = surf2['rr'][surf2['tris']]
tri_nn = surf2['tri_nn']
tri_area = surf2['tri_area']
submat = coeff[offsets[si_1]:offsets[si_1 + 1],
offsets[si_2]:offsets[si_2 + 1]] # view
for k in range(surf2['ntri']):
tri = surf2['tris'][k]
if si_1 == si_2:
skip_idx = ((rr_ord == tri[0]) |
(rr_ord == tri[1]) |
(rr_ord == tri[2]))
else:
skip_idx = list()
# No contribution from a triangle that
# this vertex belongs to
# if sidx1 == sidx2 and (tri == j).any():
# continue
# Otherwise do the hard job
coeffs = _lin_pot_coeff(surf1['rr'], tri_rr[k], tri_nn[k],
tri_area[k])
coeffs[skip_idx] = 0.
submat[:, tri] -= coeffs
if si_1 == si_2:
_correct_auto_elements(surf1, submat)
return coeff
def _fwd_bem_multi_solution(solids, gamma, nps):
"""Do multi surface solution
* Invert I - solids/(2*M_PI)
* Take deflation into account
* The matrix is destroyed after inversion
* This is the general multilayer case
"""
pi2 = 1.0 / (2 * np.pi)
n_tot = np.sum(nps)
assert solids.shape == (n_tot, n_tot)
nsurf = len(nps)
defl = 1.0 / n_tot
# Modify the matrix
offsets = np.cumsum(np.concatenate(([0], nps)))
for si_1 in range(nsurf):
for si_2 in range(nsurf):
mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2]
slice_j = slice(offsets[si_1], offsets[si_1 + 1])
slice_k = slice(offsets[si_2], offsets[si_2 + 1])
solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult
solids += np.eye(n_tot)
return linalg.inv(solids, overwrite_a=True)
def _fwd_bem_homog_solution(solids, nps):
"""Helper to make a homogeneous solution"""
return _fwd_bem_multi_solution(solids, None, nps)
def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri):
"""Modify the solution according to the IP approach"""
n_last = n_tri[-1]
mult = (1.0 + ip_mult) / ip_mult
logger.info(' Combining...')
offsets = np.cumsum(np.concatenate(([0], n_tri)))
for si in range(len(n_tri)):
# Pick the correct submatrix (right column) and multiply
sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):]
# Multiply
sub -= 2 * np.dot(sub, ip_solution)
# The lower right corner is a special case
sub[-n_last:, -n_last:] += mult * ip_solution
# Final scaling
logger.info(' Scaling...')
solution *= ip_mult
return
def _fwd_bem_linear_collocation_solution(m):
"""Compute the linear collocation potential solution"""
# first, add surface geometries
from .surface import _complete_surface_info
for surf in m['surfs']:
_complete_surface_info(surf, verbose=False)
logger.info('Computing the linear collocation solution...')
logger.info(' Matrix coefficients...')
coeff = _fwd_bem_lin_pot_coeff(m['surfs'])
m['nsol'] = len(coeff)
logger.info(" Inverting the coefficient matrix...")
nps = [surf['np'] for surf in m['surfs']]
m['solution'] = _fwd_bem_multi_solution(coeff, m['gamma'], nps)
if len(m['surfs']) == 3:
ip_mult = m['sigma'][1] / m['sigma'][2]
if ip_mult <= FIFF.FWD_BEM_IP_APPROACH_LIMIT:
logger.info('IP approach required...')
logger.info(' Matrix coefficients (homog)...')
coeff = _fwd_bem_lin_pot_coeff([m['surfs'][-1]])
logger.info(' Inverting the coefficient matrix (homog)...')
ip_solution = _fwd_bem_homog_solution(coeff,
[m['surfs'][-1]['np']])
logger.info(' Modify the original solution to incorporate '
'IP approach...')
_fwd_bem_ip_modify_solution(m['solution'], ip_solution, ip_mult,
nps)
m['bem_method'] = FIFF.FWD_BEM_LINEAR_COLL
logger.info("Solution ready.")
@verbose
def make_bem_solution(surfs, verbose=None):
"""Create a BEM solution using the linear collocation approach
Parameters
----------
surfs : list of dict
The BEM surfaces to use.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
bem : dict
The BEM solution.
Notes
-----
.. versionadded:: 0.10.0
See Also
--------
make_bem_model
read_bem_surfaces
write_bem_surfaces
read_bem_solution
write_bem_solution
"""
logger.info('Approximation method : Linear collocation\n')
if isinstance(surfs, string_types):
# Load the surfaces
logger.info('Loading surfaces...')
surfs = read_bem_surfaces(surfs)
bem = dict(surfs=surfs)
_add_gamma_multipliers(bem)
if len(bem['surfs']) == 3:
logger.info('Three-layer model surfaces loaded.')
elif len(bem['surfs']) == 1:
logger.info('Homogeneous model surface loaded.')
else:
raise RuntimeError('Only 1- or 3-layer BEM computations supported')
_fwd_bem_linear_collocation_solution(bem)
logger.info('BEM geometry computations complete.')
return bem
# ############################################################################
# Make BEM model
def _ico_downsample(surf, dest_grade):
"""Downsample the surface if isomorphic to a subdivided icosahedron"""
from .surface import _get_ico_surface
n_tri = surf['ntri']
found = -1
bad_msg = ("A surface with %d triangles cannot be isomorphic with a "
"subdivided icosahedron." % surf['ntri'])
if n_tri % 20 != 0:
raise RuntimeError(bad_msg)
n_tri = n_tri // 20
found = int(round(np.log(n_tri) / np.log(4)))
if n_tri != 4 ** found:
raise RuntimeError(bad_msg)
del n_tri
if dest_grade > found:
raise RuntimeError('For this surface, decimation grade should be %d '
'or less, not %s.' % (found, dest_grade))
source = _get_ico_surface(found)
dest = _get_ico_surface(dest_grade, patch_stats=True)
del dest['tri_cent']
del dest['tri_nn']
del dest['neighbor_tri']
del dest['tri_area']
if not np.array_equal(source['tris'], surf['tris']):
raise RuntimeError('The source surface has a matching number of '
'triangles but ordering is wrong')
logger.info('Going from %dth to %dth subdivision of an icosahedron '
'(n_tri: %d -> %d)' % (found, dest_grade, surf['ntri'],
dest['ntri']))
# Find the mapping
dest['rr'] = surf['rr'][_get_ico_map(source, dest)]
return dest
def _get_ico_map(fro, to):
"""Helper to get a mapping between ico surfaces"""
from .surface import _compute_nearest
nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True)
n_bads = (dists > 5e-3).sum()
if n_bads > 0:
raise RuntimeError('No matching vertex for %d destination vertices'
% (n_bads))
return nearest
def _order_surfaces(surfs):
"""Reorder the surfaces"""
if len(surfs) != 3:
return surfs
# we have three surfaces
surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD,
FIFF.FIFFV_BEM_SURF_ID_SKULL,
FIFF.FIFFV_BEM_SURF_ID_BRAIN]
ids = np.array([surf['id'] for surf in surfs])
if set(ids) != set(surf_order):
raise RuntimeError('bad surface ids: %s' % ids)
order = [np.where(ids == id_)[0][0] for id_ in surf_order]
surfs = [surfs[idx] for idx in order]
return surfs
def _assert_complete_surface(surf):
"""Check the sum of solid angles as seen from inside"""
# from surface_checks.c
from .source_space import _get_solids
tot_angle = 0.
# Center of mass....
cm = surf['rr'].mean(axis=0)
logger.info('%s CM is %6.2f %6.2f %6.2f mm' %
(_surf_name[surf['id']],
1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0]
if np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5:
raise RuntimeError('Surface %s is not complete (sum of solid angles '
'= %g * 4*PI instead).' %
(_surf_name[surf['id']], tot_angle))
_surf_name = {
FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ',
FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull',
FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull',
FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ',
}
def _assert_inside(fro, to):
"""Helper to check one set of points is inside a surface"""
# this is "is_inside" in surface_checks.c
from .source_space import _get_solids
tot_angle = _get_solids(to['rr'][to['tris']], fro['rr'])
if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any():
raise RuntimeError('Surface %s is not completely inside surface %s'
% (_surf_name[fro['id']], _surf_name[to['id']]))
def _check_surfaces(surfs):
"""Check that the surfaces are complete and non-intersecting"""
for surf in surfs:
_assert_complete_surface(surf)
# Then check the topology
for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
logger.info('Checking that %s surface is inside %s surface...' %
(_surf_name[surf_2['id']], _surf_name[surf_1['id']]))
_assert_inside(surf_2, surf_1)
def _check_surface_size(surf):
"""Check that the coordinate limits are reasonable"""
sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)
if (sizes < 0.05).any():
raise RuntimeError('Dimensions of the surface %s seem too small '
'(%9.5f mm). Maybe the the unit of measure is '
'meters instead of mm' %
(_surf_name[surf['id']], 1000 * sizes.min()))
def _check_thicknesses(surfs):
"""How close are we?"""
from .surface import _compute_nearest
for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'],
return_dists=True)[0]
min_dist = min_dist.min()
logger.info('Checking distance between %s and %s surfaces...' %
(_surf_name[surf_1['id']], _surf_name[surf_2['id']]))
logger.info('Minimum distance between the %s and %s surfaces is '
'approximately %6.1f mm' %
(_surf_name[surf_1['id']], _surf_name[surf_2['id']],
1000 * min_dist))
def _surfaces_to_bem(fname_surfs, ids, sigmas, ico=None):
"""Convert surfaces to a BEM
"""
from .surface import _read_surface_geom
# equivalent of mne_surf2bem
surfs = list()
assert len(fname_surfs) in (1, 3)
for fname in fname_surfs:
surfs.append(_read_surface_geom(fname, patch_stats=False,
verbose=False))
surfs[-1]['rr'] /= 1000.
# Downsampling if the surface is isomorphic with a subdivided icosahedron
if ico is not None:
for si, surf in enumerate(surfs):
surfs[si] = _ico_downsample(surf, ico)
for surf, id_ in zip(surfs, ids):
surf['id'] = id_
# Shifting surfaces is not implemented here
# Order the surfaces for the benefit of the topology checks
for surf, sigma in zip(surfs, sigmas):
surf['sigma'] = sigma
surfs = _order_surfaces(surfs)
# Check topology as best we can
_check_surfaces(surfs)
for surf in surfs:
_check_surface_size(surf)
_check_thicknesses(surfs)
logger.info('Surfaces passed the basic topology checks.')
return surfs
@verbose
def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3),
subjects_dir=None, verbose=None):
"""Create a BEM model for a subject
Parameters
----------
subject : str
The subject.
ico : int | None
The surface ico downsampling to use, e.g. 5=20484, 4=5120, 3=1280.
conductivity : array of int, shape (3,) or (1,)
The conductivities to use for each shell. Should be a single element
for a one-layer model, or three elements for a three-layer model.
Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a
single-layer model would be ``[0.3]``.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
surfaces : list of dict
The BEM surfaces.
Notes
-----
.. versionadded:: 0.10.0
See Also
--------
make_bem_solution
make_sphere_model
read_bem_surfaces
write_bem_surfaces
"""
conductivity = np.array(conductivity, float)
if conductivity.ndim != 1 or conductivity.size not in (1, 3):
raise ValueError('conductivity must be 1D array-like with 1 or 3 '
'elements')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = op.join(subjects_dir, subject)
bem_dir = op.join(subject_dir, 'bem')
inner_skull = op.join(bem_dir, 'inner_skull.surf')
outer_skull = op.join(bem_dir, 'outer_skull.surf')
outer_skin = op.join(bem_dir, 'outer_skin.surf')
surfaces = [inner_skull, outer_skull, outer_skin]
ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN,
FIFF.FIFFV_BEM_SURF_ID_SKULL,
FIFF.FIFFV_BEM_SURF_ID_HEAD]
logger.info('Creating the BEM geometry...')
if len(conductivity) == 1:
surfaces = surfaces[:1]
ids = ids[:1]
surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico)
logger.info('Complete.\n')
return surfaces
# ############################################################################
# Compute EEG sphere model
def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms):
"""Get the model depended weighting factor for n"""
nlayer = len(m['layers'])
if nlayer in (0, 1):
return 1.
# Initialize the arrays
c1 = np.zeros(nlayer - 1)
c2 = np.zeros(nlayer - 1)
cr = np.zeros(nlayer - 1)
cr_mult = np.zeros(nlayer - 1)
for k in range(nlayer - 1):
c1[k] = m['layers'][k]['sigma'] / m['layers'][k + 1]['sigma']
c2[k] = c1[k] - 1.0
cr_mult[k] = m['layers'][k]['rel_rad']
cr[k] = cr_mult[k]
cr_mult[k] *= cr_mult[k]
coeffs = np.zeros(n_terms - 1)
for n in range(1, n_terms):
# Increment the radius coefficients
for k in range(nlayer - 1):
cr[k] *= cr_mult[k]
# Multiply the matrices
M = np.eye(2)
n1 = n + 1.0
for k in range(nlayer - 2, -1, -1):
M = np.dot([[n + n1 * c1[k], n1 * c2[k] / cr[k]],
[n * c2[k] * cr[k], n1 + n * c1[k]]], M)
num = n * (2.0 * n + 1.0) ** (nlayer - 1)
coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0])
return coeffs
def _compose_linear_fitting_data(mu, u):
# y is the data to be fitted (nterms-1 x 1)
# M is the model matrix (nterms-1 x nfit-1)
for k in range(u['nterms'] - 1):
k1 = k + 1
mu1n = np.power(mu[0], k1)
u['y'][k] = u['w'][k] * (u['fn'][k1] - mu1n * u['fn'][0])
for p in range(u['nfit'] - 1):
u['M'][k][p] = u['w'][k] * (np.power(mu[p + 1], k1) - mu1n)
def _compute_linear_parameters(mu, u):
"""Compute the best-fitting linear parameters"""
_compose_linear_fitting_data(mu, u)
uu, sing, vv = linalg.svd(u['M'], full_matrices=False)
# Compute the residuals
u['resi'] = u['y'].copy()
vec = np.empty(u['nfit'] - 1)
for p in range(u['nfit'] - 1):
vec[p] = np.dot(uu[:, p], u['y'])
for k in range(u['nterms'] - 1):
u['resi'][k] -= uu[k, p] * vec[p]
vec[p] = vec[p] / sing[p]
lambda_ = np.zeros(u['nfit'])
for p in range(u['nfit'] - 1):
sum_ = 0.
for q in range(u['nfit'] - 1):
sum_ += vv[q, p] * vec[q]
lambda_[p + 1] = sum_
lambda_[0] = u['fn'][0] - np.sum(lambda_[1:])
rv = np.dot(u['resi'], u['resi']) / np.dot(u['y'], u['y'])
return rv, lambda_
def _one_step(mu, u):
"""Evaluate the residual sum of squares fit for one set of mu values"""
if np.abs(mu).max() > 1.0:
return 1.0
# Compose the data for the linear fitting, compute SVD, then residuals
_compose_linear_fitting_data(mu, u)
u['uu'], u['sing'], u['vv'] = linalg.svd(u['M'])
u['resi'][:] = u['y'][:]
for p in range(u['nfit'] - 1):
dot = np.dot(u['uu'][p], u['y'])
for k in range(u['nterms'] - 1):
u['resi'][k] = u['resi'][k] - u['uu'][p, k] * dot
# Return their sum of squares
return np.dot(u['resi'], u['resi'])
def _fwd_eeg_fit_berg_scherg(m, nterms, nfit):
"""Fit the Berg-Scherg equivalent spherical model dipole parameters"""
from scipy.optimize import fmin_cobyla
assert nfit >= 2
u = dict(y=np.zeros(nterms - 1), resi=np.zeros(nterms - 1),
nfit=nfit, nterms=nterms, M=np.zeros((nterms - 1, nfit - 1)))
# (1) Calculate the coefficients of the true expansion
u['fn'] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1)
# (2) Calculate the weighting
f = (min([layer['rad'] for layer in m['layers']]) /
max([layer['rad'] for layer in m['layers']]))
# correct weighting
k = np.arange(1, nterms + 1)
u['w'] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) /
k) * np.power(f, (k - 1.0))
u['w'][-1] = 0
# Do the nonlinear minimization, constraining mu to the interval [-1, +1]
mu_0 = np.random.RandomState(0).rand(nfit) * f
fun = partial(_one_step, u=u)
max_ = 1. - 2e-4 # adjust for fmin_cobyla "catol" that not all scipy have
cons = [(lambda x: max_ - np.abs(x[ii])) for ii in range(nfit)]
mu = fmin_cobyla(fun, mu_0, cons, rhobeg=0.5, rhoend=5e-3, disp=0)
# (6) Do the final step: calculation of the linear parameters
rv, lambda_ = _compute_linear_parameters(mu, u)
order = np.argsort(mu)[::-1]
mu, lambda_ = mu[order], lambda_[order] # sort: largest mu first
m['mu'] = mu
# This division takes into account the actual conductivities
m['lambda'] = lambda_ / m['layers'][-1]['sigma']
m['nfit'] = nfit
return rv
@verbose
def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,
relative_radii=(0.90, 0.92, 0.97, 1.0),
sigmas=(0.33, 1.0, 0.004, 0.33), verbose=None):
"""Create a spherical model for forward solution calculation
Parameters
----------
r0 : array-like | str
Head center to use (in head coordinates). If 'auto', the head
center will be calculated from the digitization points in info.
head_radius : float | str | None
If float, compute spherical shells for EEG using the given radius.
If 'auto', estimate an approriate radius from the dig points in Info,
If None, exclude shells.
info : instance of mne.io.meas_info.Info | None
Measurement info. Only needed if ``r0`` or ``head_radius`` are
``'auto'``.
relative_radii : array-like
Relative radii for the spherical shells.
sigmas : array-like
Sigma values for the spherical shells.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
sphere : dict
A spherical BEM.
Notes
-----
.. versionadded:: 0.9.0
See Also
--------
make_bem_model
make_bem_solution
"""
for name in ('r0', 'head_radius'):
param = locals()[name]
if isinstance(param, string_types):
if param != 'auto':
raise ValueError('%s, if str, must be "auto" not "%s"'
% (name, param))
if (isinstance(r0, string_types) and r0 == 'auto') or \
(isinstance(head_radius, string_types) and head_radius == 'auto'):
if info is None:
raise ValueError('Info must not be None for auto mode')
head_radius_fit, r0_fit = fit_sphere_to_headshape(info)[:2]
if isinstance(r0, string_types):
r0 = r0_fit / 1000.
if isinstance(head_radius, string_types):
head_radius = head_radius_fit / 1000.
sphere = dict(r0=np.array(r0), is_sphere=True,
coord_frame=FIFF.FIFFV_COORD_HEAD)
sphere['layers'] = list()
if head_radius is not None:
# Eventually these could be configurable...
relative_radii = np.array(relative_radii, float)
sigmas = np.array(sigmas, float)
order = np.argsort(relative_radii)
relative_radii = relative_radii[order]
sigmas = sigmas[order]
layers = sphere['layers']
for rel_rad, sig in zip(relative_radii, sigmas):
# sort layers by (relative) radius, and scale radii
layer = dict(rad=rel_rad, sigma=sig)
layer['rel_rad'] = layer['rad'] = rel_rad
layers.append(layer)
# scale the radii
R = layers[-1]['rad']
rR = layers[-1]['rel_rad']
for layer in layers:
layer['rad'] /= R
layer['rel_rad'] /= rR
#
# Setup the EEG sphere model calculations
#
# Scale the relative radii
for k in range(len(relative_radii)):
layers[k]['rad'] = (head_radius * layers[k]['rel_rad'])
rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3)
logger.info('\nEquiv. model fitting -> RV = %g %%' % (100 * rv))
for k in range(3):
logger.info('mu%d = %g lambda%d = %g'
% (k + 1, sphere['mu'][k], k + 1,
layers[-1]['sigma'] * sphere['lambda'][k]))
logger.info('Set up EEG sphere model with scalp radius %7.1f mm\n'
% (1000 * head_radius,))
return sphere
# #############################################################################
# Helpers
@verbose
def fit_sphere_to_headshape(info, dig_kinds=(FIFF.FIFFV_POINT_EXTRA,),
verbose=None):
"""Fit a sphere to the headshape points to determine head center
Parameters
----------
info : instance of mne.io.meas_info.Info
Measurement info.
dig_kinds : tuple of int
Kind of digitization points to use in the fitting. These can be
any kind defined in io.constants.FIFF:
FIFFV_POINT_CARDINAL
FIFFV_POINT_HPI
FIFFV_POINT_EEG
FIFFV_POINT_ECG
FIFFV_POINT_EXTRA
Defaults to (FIFFV_POINT_EXTRA,).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
radius : float
Sphere radius in mm.
origin_head: ndarray, shape (3,)
Head center in head coordinates (mm).
origin_device: ndarray, shape (3,)
Head center in device coordinates (mm).
"""
# get head digization points of the specified kind
hsp = [p['r'] for p in info['dig'] if p['kind'] in dig_kinds]
# exclude some frontal points (nose etc.)
hsp = [p for p in hsp if not (p[2] < 0 and p[1] > 0)]
if len(hsp) == 0:
raise ValueError('No head digitization points of the specified '
'kinds (%s) found.' % dig_kinds)
hsp = 1e3 * np.array(hsp)
radius, origin_head = _fit_sphere(hsp, disp=False)
# compute origin in device coordinates
trans = info['dev_head_t']
if trans['from'] != FIFF.FIFFV_COORD_DEVICE \
or trans['to'] != FIFF.FIFFV_COORD_HEAD:
raise RuntimeError('device to head transform not found')
head_to_dev = linalg.inv(trans['trans'])
origin_device = 1e3 * np.dot(head_to_dev,
np.r_[1e-3 * origin_head, 1.0])[:3]
logger.info('Fitted sphere radius:'.ljust(30) + '%0.1f mm' % radius)
logger.info('Origin head coordinates:'.ljust(30) +
'%0.1f %0.1f %0.1f mm' % tuple(origin_head))
logger.info('Origin device coordinates:'.ljust(30) +
'%0.1f %0.1f %0.1f mm' % tuple(origin_device))
return radius, origin_head, origin_device
def _fit_sphere(points, disp='auto'):
"""Aux function to fit points to a sphere"""
from scipy.optimize import fmin_powell
if isinstance(disp, string_types) and disp == 'auto':
disp = True if logger.level <= 20 else False
# initial guess for center and radius
xradius = (np.max(points[:, 0]) - np.min(points[:, 0])) / 2.
yradius = (np.max(points[:, 1]) - np.min(points[:, 1])) / 2.
radius_init = (xradius + yradius) / 2.
center_init = np.array([0.0, 0.0, np.max(points[:, 2]) - radius_init])
# optimization
x0 = np.r_[center_init, radius_init]
def cost_fun(x, points):
return np.sum((np.sqrt(np.sum((points - x[:3]) ** 2, axis=1)) -
x[3]) ** 2)
x_opt = fmin_powell(cost_fun, x0, args=(points,), disp=disp)
origin = x_opt[:3]
radius = x_opt[3]
return radius, origin
# ############################################################################
# Create BEM surfaces
@verbose
def make_watershed_bem(subject, subjects_dir=None, overwrite=False,
volume='T1', atlas=False, gcaatlas=False, preflood=None,
verbose=None):
"""
Create BEM surfaces using the watershed algorithm included with FreeSurfer
Parameters
----------
subject : str
Subject name (required)
subjects_dir : str
Directory containing subjects data. If None use
the Freesurfer SUBJECTS_DIR environment variable.
overwrite : bool
Write over existing files
volume : str
Defaults to T1
atlas : bool
Specify the --atlas option for mri_watershed
gcaatlas : bool
Use the subcortical atlas
preflood : int
Change the preflood height
verbose : bool, str or None
If not None, override default verbose level
.. versionadded:: 0.10
"""
from .surface import read_surface
env = os.environ.copy()
if not os.environ.get('FREESURFER_HOME'):
raise RuntimeError('FREESURFER_HOME environment variable not set')
env['SUBJECT'] = subject
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
env['SUBJECTS_DIR'] = subjects_dir
subject_dir = op.join(subjects_dir, subject)
mri_dir = op.join(subject_dir, 'mri')
T1_dir = op.join(mri_dir, volume)
T1_mgz = op.join(mri_dir, volume + '.mgz')
bem_dir = op.join(subject_dir, 'bem')
ws_dir = op.join(subject_dir, 'bem', 'watershed')
if not op.isdir(subject_dir):
raise RuntimeError('Could not find the MRI data directory "%s"'
% subject_dir)
if not op.isdir(bem_dir):
os.makedirs(bem_dir)
if not op.isdir(T1_dir) and not op.isfile(T1_mgz):
raise RuntimeError('Could not find the MRI data')
if op.isdir(ws_dir):
if not overwrite:
raise RuntimeError('%s already exists. Use the --overwrite option'
'to recreate it.' % ws_dir)
else:
shutil.rmtree(ws_dir)
# put together the command
cmd = ['mri_watershed']
if preflood:
cmd += ["-h", "%s" % int(preflood)]
if gcaatlas:
cmd += ['-atlas', '-T1', '-brain_atlas', env['FREESURFER_HOME'] +
'/average/RB_all_withskull_2007-08-08.gca',
subject_dir + '/mri/transforms/talairach_with_skull.lta']
elif atlas:
cmd += ['-atlas']
if op.exists(T1_mgz):
cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_mgz,
op.join(ws_dir, 'ws')]
else:
cmd += ['-useSRAS', '-surf', op.join(ws_dir, subject), T1_dir,
op.join(ws_dir, 'ws')]
# report and run
logger.info('\nRunning mri_watershed for BEM segmentation with the '
'following parameters:\n\n'
'SUBJECTS_DIR = %s\n'
'SUBJECT = %s\n'
'Results dir = %s\n' % (subjects_dir, subject, ws_dir))
os.makedirs(op.join(ws_dir, 'ws'))
run_subprocess(cmd, env=env, stdout=sys.stdout)
#
os.chdir(ws_dir)
if op.isfile(T1_mgz):
# XXX : do this with python code
surfaces = [subject + '_brain_surface', subject +
'_inner_skull_surface', subject + '_outer_skull_surface',
subject + '_outer_skin_surface']
for s in surfaces:
cmd = ['mne_convert_surface', '--surf', s, '--mghmri', T1_mgz,
'--surfout', s, "--replacegeom"]
run_subprocess(cmd, env=env, stdout=sys.stdout)
os.chdir(bem_dir)
if op.isfile(subject + '-head.fif'):
os.remove(subject + '-head.fif')
# run the equivalent of mne_surf2bem
points, tris = read_surface(op.join(ws_dir,
subject + '_outer_skin_surface'))
points *= 1e-3
surf = dict(coord_frame=5, id=4, nn=None, np=len(points),
ntri=len(tris), rr=points, sigma=1, tris=tris)
write_bem_surfaces(subject + '-head.fif', surf)
logger.info('Created %s/%s-head.fif\n\nComplete.' % (bem_dir, subject))
# ############################################################################
# Read
@verbose
def read_bem_surfaces(fname, patch_stats=False, s_id=None, verbose=None):
"""Read the BEM surfaces from a FIF file
Parameters
----------
fname : string
The name of the file containing the surfaces.
patch_stats : bool, optional (default False)
Calculate and add cortical patch statistics to the surfaces.
s_id : int | None
If int, only read and return the surface with the given s_id.
An error will be raised if it doesn't exist. If None, all
surfaces are read and returned.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
surf: list | dict
A list of dictionaries that each contain a surface. If s_id
is not None, only the requested surface will be returned.
"""
from .surface import _complete_surface_info
# Default coordinate frame
coord_frame = FIFF.FIFFV_COORD_MRI
# Open the file, create directory
f, tree, _ = fiff_open(fname)
with f as fid:
# Find BEM
bem = dir_tree_find(tree, FIFF.FIFFB_BEM)
if bem is None or len(bem) == 0:
raise ValueError('BEM data not found')
bem = bem[0]
# Locate all surfaces
bemsurf = dir_tree_find(bem, FIFF.FIFFB_BEM_SURF)
if bemsurf is None:
raise ValueError('BEM surface data not found')
logger.info(' %d BEM surfaces found' % len(bemsurf))
# Coordinate frame possibly at the top level
tag = find_tag(fid, bem, FIFF.FIFF_BEM_COORD_FRAME)
if tag is not None:
coord_frame = tag.data
# Read all surfaces
if s_id is not None:
surf = [_read_bem_surface(fid, bsurf, coord_frame, s_id)
for bsurf in bemsurf]
surf = [s for s in surf if s is not None]
if not len(surf) == 1:
raise ValueError('surface with id %d not found' % s_id)
else:
surf = list()
for bsurf in bemsurf:
logger.info(' Reading a surface...')
this = _read_bem_surface(fid, bsurf, coord_frame)
surf.append(this)
logger.info('[done]')
logger.info(' %d BEM surfaces read' % len(surf))
if patch_stats:
for this in surf:
_complete_surface_info(this)
return surf[0] if s_id is not None else surf
def _read_bem_surface(fid, this, def_coord_frame, s_id=None):
"""Read one bem surface
"""
# fid should be open as a context manager here
res = dict()
# Read all the interesting stuff
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_ID)
if tag is None:
res['id'] = FIFF.FIFFV_BEM_SURF_ID_UNKNOWN
else:
res['id'] = int(tag.data)
if s_id is not None and res['id'] != s_id:
return None
tag = find_tag(fid, this, FIFF.FIFF_BEM_SIGMA)
res['sigma'] = 1.0 if tag is None else float(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NNODE)
if tag is None:
raise ValueError('Number of vertices not found')
res['np'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NTRI)
if tag is None:
raise ValueError('Number of triangles not found')
res['ntri'] = int(tag.data)
tag = find_tag(fid, this, FIFF.FIFF_MNE_COORD_FRAME)
if tag is None:
tag = find_tag(fid, this, FIFF.FIFF_BEM_COORD_FRAME)
if tag is None:
res['coord_frame'] = def_coord_frame
else:
res['coord_frame'] = tag.data
else:
res['coord_frame'] = tag.data
# Vertices, normals, and triangles
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NODES)
if tag is None:
raise ValueError('Vertex data not found')
res['rr'] = tag.data.astype(np.float) # XXX : double because of mayavi bug
if res['rr'].shape[0] != res['np']:
raise ValueError('Vertex information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_MNE_SOURCE_SPACE_NORMALS)
if tag is None:
tag = tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_NORMALS)
if tag is None:
res['nn'] = list()
else:
res['nn'] = tag.data
if res['nn'].shape[0] != res['np']:
raise ValueError('Vertex normal information is incorrect')
tag = find_tag(fid, this, FIFF.FIFF_BEM_SURF_TRIANGLES)
if tag is None:
raise ValueError('Triangulation not found')
res['tris'] = tag.data - 1 # index start at 0 in Python
if res['tris'].shape[0] != res['ntri']:
raise ValueError('Triangulation information is incorrect')
return res
@verbose
def read_bem_solution(fname, verbose=None):
"""Read the BEM solution from a file
Parameters
----------
fname : string
The file containing the BEM solution.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
bem : dict
The BEM solution.
"""
# mirrors fwd_bem_load_surfaces from fwd_bem_model.c
logger.info('Loading surfaces...')
bem_surfs = read_bem_surfaces(fname, patch_stats=True, verbose=False)
if len(bem_surfs) == 3:
logger.info('Three-layer model surfaces loaded.')
needed = np.array([FIFF.FIFFV_BEM_SURF_ID_HEAD,
FIFF.FIFFV_BEM_SURF_ID_SKULL,
FIFF.FIFFV_BEM_SURF_ID_BRAIN])
if not all(x['id'] in needed for x in bem_surfs):
raise RuntimeError('Could not find necessary BEM surfaces')
# reorder surfaces as necessary (shouldn't need to?)
reorder = [None] * 3
for x in bem_surfs:
reorder[np.where(x['id'] == needed)[0][0]] = x
bem_surfs = reorder
elif len(bem_surfs) == 1:
if not bem_surfs[0]['id'] == FIFF.FIFFV_BEM_SURF_ID_BRAIN:
raise RuntimeError('BEM Surfaces not found')
logger.info('Homogeneous model surface loaded.')
# convert from surfaces to solution
bem = dict(surfs=bem_surfs)
logger.info('\nLoading the solution matrix...\n')
f, tree, _ = fiff_open(fname)
with f as fid:
# Find the BEM data
nodes = dir_tree_find(tree, FIFF.FIFFB_BEM)
if len(nodes) == 0:
raise RuntimeError('No BEM data in %s' % fname)
bem_node = nodes[0]
# Approximation method
tag = find_tag(f, bem_node, FIFF.FIFF_BEM_APPROX)
if tag is None:
raise RuntimeError('No BEM solution found in %s' % fname)
method = tag.data[0]
if method not in (FIFF.FIFFV_BEM_APPROX_CONST,
FIFF.FIFFV_BEM_APPROX_LINEAR):
raise RuntimeError('Cannot handle BEM approximation method : %d'
% method)
tag = find_tag(fid, bem_node, FIFF.FIFF_BEM_POT_SOLUTION)
dims = tag.data.shape
if len(dims) != 2:
raise RuntimeError('Expected a two-dimensional solution matrix '
'instead of a %d dimensional one' % dims[0])
dim = 0
for surf in bem['surfs']:
if method == FIFF.FIFFV_BEM_APPROX_LINEAR:
dim += surf['np']
else: # method == FIFF.FIFFV_BEM_APPROX_CONST
dim += surf['ntri']
if dims[0] != dim or dims[1] != dim:
raise RuntimeError('Expected a %d x %d solution matrix instead of '
'a %d x %d one' % (dim, dim, dims[1], dims[0]))
sol = tag.data
nsol = dims[0]
bem['solution'] = sol
bem['nsol'] = nsol
bem['bem_method'] = method
# Gamma factors and multipliers
_add_gamma_multipliers(bem)
kind = {
FIFF.FIFFV_BEM_APPROX_CONST: 'constant collocation',
FIFF.FIFFV_BEM_APPROX_LINEAR: 'linear_collocation',
}[bem['bem_method']]
logger.info('Loaded %s BEM solution from %s', kind, fname)
return bem
def _add_gamma_multipliers(bem):
"""Helper to add gamma and multipliers in-place"""
bem['sigma'] = np.array([surf['sigma'] for surf in bem['surfs']])
# Dirty trick for the zero conductivity outside
sigma = np.r_[0.0, bem['sigma']]
bem['source_mult'] = 2.0 / (sigma[1:] + sigma[:-1])
bem['field_mult'] = sigma[1:] - sigma[:-1]
# make sure subsequent "zip"s work correctly
assert len(bem['surfs']) == len(bem['field_mult'])
bem['gamma'] = ((sigma[1:] - sigma[:-1])[np.newaxis, :] /
(sigma[1:] + sigma[:-1])[:, np.newaxis])
bem['is_sphere'] = False
_surf_dict = {'inner_skull': FIFF.FIFFV_BEM_SURF_ID_BRAIN,
'outer_skull': FIFF.FIFFV_BEM_SURF_ID_SKULL,
'head': FIFF.FIFFV_BEM_SURF_ID_HEAD}
def _bem_find_surface(bem, id_):
"""Find surface from already-loaded BEM"""
if isinstance(id_, string_types):
name = id_
id_ = _surf_dict[id_]
else:
name = _bem_explain_surface(id_)
idx = np.where(np.array([s['id'] for s in bem['surfs']]) == id_)[0]
if len(idx) != 1:
raise RuntimeError('BEM model does not have the %s triangulation'
% name.replace('_', ' '))
return bem['surfs'][idx[0]]
def _bem_explain_surface(id_):
"""Return a string corresponding to the given surface ID"""
_rev_dict = dict((val, key) for key, val in _surf_dict.items())
return _rev_dict[id_]
# ############################################################################
# Write
@deprecated('write_bem_surface is deprecated and will be removed in 0.11, '
'use write_bem_surfaces instead')
def write_bem_surface(fname, surf):
"""Write one bem surface
Parameters
----------
fname : string
File to write
surf : dict
A surface structured as obtained with read_bem_surfaces
"""
write_bem_surfaces(fname, surf)
def write_bem_surfaces(fname, surfs):
"""Write BEM surfaces to a fiff file
Parameters
----------
fname : str
Filename to write.
surfs : dict | list of dict
The surfaces, or a single surface.
"""
if isinstance(surfs, dict):
surfs = [surfs]
with start_file(fname) as fid:
start_block(fid, FIFF.FIFFB_BEM)
write_int(fid, FIFF.FIFF_BEM_COORD_FRAME, surfs[0]['coord_frame'])
_write_bem_surfaces_block(fid, surfs)
end_block(fid, FIFF.FIFFB_BEM)
end_file(fid)
def _write_bem_surfaces_block(fid, surfs):
"""Helper to actually write bem surfaces"""
for surf in surfs:
start_block(fid, FIFF.FIFFB_BEM_SURF)
write_float(fid, FIFF.FIFF_BEM_SIGMA, surf['sigma'])
write_int(fid, FIFF.FIFF_BEM_SURF_ID, surf['id'])
write_int(fid, FIFF.FIFF_MNE_COORD_FRAME, surf['coord_frame'])
write_int(fid, FIFF.FIFF_BEM_SURF_NNODE, surf['np'])
write_int(fid, FIFF.FIFF_BEM_SURF_NTRI, surf['ntri'])
write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NODES, surf['rr'])
# index start at 0 in Python
write_int_matrix(fid, FIFF.FIFF_BEM_SURF_TRIANGLES,
surf['tris'] + 1)
if 'nn' in surf and surf['nn'] is not None and len(surf['nn']) > 0:
write_float_matrix(fid, FIFF.FIFF_BEM_SURF_NORMALS, surf['nn'])
end_block(fid, FIFF.FIFFB_BEM_SURF)
def write_bem_solution(fname, bem):
"""Write a BEM model with solution
Parameters
----------
fname : str
The filename to use.
bem : dict
The BEM model with solution to save.
"""
with start_file(fname) as fid:
start_block(fid, FIFF.FIFFB_BEM)
# Coordinate frame (mainly for backward compatibility)
write_int(fid, FIFF.FIFF_BEM_COORD_FRAME,
bem['surfs'][0]['coord_frame'])
# Surfaces
_write_bem_surfaces_block(fid, bem['surfs'])
# The potential solution
if 'solution' in bem:
if bem['bem_method'] != FIFF.FWD_BEM_LINEAR_COLL:
raise RuntimeError('Only linear collocation supported')
write_int(fid, FIFF.FIFF_BEM_APPROX, FIFF.FIFFV_BEM_APPROX_LINEAR)
write_float_matrix(fid, FIFF.FIFF_BEM_POT_SOLUTION,
bem['solution'])
end_block(fid, FIFF.FIFFB_BEM)
end_file(fid)
|
matthew-tucker/mne-python
|
mne/bem.py
|
Python
|
bsd-3-clause
| 47,266
|
[
"Mayavi"
] |
254192d339eba333eeb4e767fd419c434eec92959571851a453748f9e8233cf7
|
"""
Base implementation of the Page Object pattern.
See https://code.google.com/p/selenium/wiki/PageObjects
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from collections import defaultdict
from functools import wraps
from contextlib import contextmanager
import logging
import os
import socket
import urlparse
import re
from textwrap import dedent
from lazy import lazy
from selenium.common.exceptions import WebDriverException
from .query import BrowserQuery
from .promise import Promise, EmptyPromise, BrokenPromise
from .a11y import AxeCoreAudit, AxsAudit
# String that can be used to test for XSS vulnerabilities.
# Taken from https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#XSS_Locator.
XSS_INJECTION = "'';!--\"<XSS>=&{()}"
# When the injected string appears within an attribute (for instance, value of an input tag,
# or alt of an img tag), if it is properly escaped this is the format we will see from
# document.documentElement.innerHTML. To avoid false positives, we need to allow this
# specific string, which hopefully is unique/odd enough that it would never appear accidentally.
EXPECTED_ATTRIBUTE_FORMAT = re.compile(r'\'\';!--"<xss>=&{\(\)}')
XSS_HTML = "<xss"
class WrongPageError(Exception):
"""
The page object reports that we're on the wrong page!
"""
pass
class PageLoadError(Exception):
"""
An error occurred while loading the page.
"""
pass
class XSSExposureError(Exception):
"""
An XSS issue has been found on the current page.
"""
pass
def unguarded(method):
"""
Mark a PageObject method as unguarded.
Unguarded methods don't verify that the PageObject is
on the current browser page before they execute
Args:
method (callable): The method to decorate.
Returns:
Decorated method
"""
method._unguarded = True # pylint: disable=protected-access
return method
def pre_verify(method):
"""
Decorator that calls self._verify_page() before executing the decorated method
Args:
method (callable): The method to decorate.
Returns:
Decorated method
"""
@wraps(method)
def wrapper(self, *args, **kwargs): # pylint: disable=missing-docstring
self._verify_page() # pylint: disable=protected-access
return method(self, *args, **kwargs)
return wrapper
class _PageObjectMetaclass(ABCMeta):
"""
Decorates any callable attributes of the class
so that they call self._verify_page() before executing.
Excludes any methods marked as unguarded with the @unguarded
decorator, any methods starting with _, or in the list ALWAYS_UNGUARDED.
"""
ALWAYS_UNGUARDED = ['url', 'is_browser_on_page']
def __new__(mcs, cls_name, cls_bases, cls_attrs):
for name, attr in cls_attrs.items():
# Skip methods marked as unguarded
if getattr(attr, '_unguarded', False) or name in mcs.ALWAYS_UNGUARDED:
continue
# Skip private methods
if name.startswith('_'):
continue
# Skip class attributes that are classes themselves
if isinstance(attr, type):
continue
is_property = isinstance(attr, property)
# Skip non-callable attributes
if not (callable(attr) or is_property):
continue
if is_property:
# For properties, wrap each of the sub-methods separately
property_methods = defaultdict(None)
for fn_name in ('fdel', 'fset', 'fget'):
prop_fn = getattr(cls_attrs[name], fn_name, None)
if prop_fn is not None:
# Check for unguarded properties
if getattr(prop_fn, '_unguarded', False):
property_methods[fn_name] = prop_fn
else:
property_methods[fn_name] = pre_verify(prop_fn)
cls_attrs[name] = property(**property_methods)
else:
cls_attrs[name] = pre_verify(attr)
return super(_PageObjectMetaclass, mcs).__new__(mcs, cls_name, cls_bases, cls_attrs)
class PageObject(object):
"""
Encapsulates user interactions with a specific part
of a web application.
The most important thing is this:
Page objects encapsulate Selenium.
If you find yourself writing CSS selectors in tests,
manipulating forms, or otherwise interacting directly
with the web UI, stop!
Instead, put these in a :class:`PageObject` subclass :)
PageObjects do their best to verify that they are only
used when the browser is on a page containing the object.
To do this, they will call :meth:`is_browser_on_page` before executing
any of their methods, and raise a :class:`WrongPageError` if the
browser isn't on the correct page.
Generally, this is the right behavior. However, at times it
will be useful to not verify the page before executing a method.
In those cases, the method can be marked with the :func:`unguarded`
decorator. Additionally, private methods (those beginning with `_`)
are always unguarded.
Class or instance properties are never guarded. However, methods
marked with the :func:`property` are candidates for being guarded.
To make them unguarded, you must mark the getter, setter, and deleter
as :func:`unguarded` separately, and those decorators must be applied before
the :func:`property` decorator.
Correct::
@property
@unguarded
def foo(self):
return self._foo
Incorrect::
@unguarded
@property
def foo(self):
return self._foo
"""
__metaclass__ = _PageObjectMetaclass
def __init__(self, browser, *args, **kwargs):
"""
Initialize the page object to use the specified browser instance.
Args:
browser (selenium.webdriver): The Selenium-controlled browser.
Returns:
PageObject
"""
super(PageObject, self).__init__(*args, **kwargs)
self.browser = browser
a11y_flag = os.environ.get('VERIFY_ACCESSIBILITY', 'False')
self.verify_accessibility = a11y_flag.lower() == 'true'
xss_flag = os.environ.get('VERIFY_XSS', 'False')
self.verify_xss = xss_flag.lower() == 'true'
@lazy
def a11y_audit(self):
"""
Initializes the a11y_audit attribute.
"""
rulesets = {
"axe_core": AxeCoreAudit,
"google_axs": AxsAudit,
}
ruleset = rulesets[
os.environ.get("BOKCHOY_A11Y_RULESET", 'axe_core')]
return ruleset(self.browser, self.url)
@abstractmethod
def is_browser_on_page(self):
"""
Check that we are on the right page in the browser.
The specific check will vary from page to page,
but usually this amounts to checking the:
1) browser URL
2) page title
3) page headings
Returns:
A `bool` indicating whether the browser is on the correct page.
"""
return False
@abstractproperty
def url(self):
"""
Return the URL of the page. This may be dynamic,
determined by configuration options passed to the
page object's constructor.
Some pages may not be directly accessible:
perhaps the page object represents a "navigation"
component that occurs on multiple pages.
If this is the case, subclasses can return `None`
to indicate that you can't directly visit the page object.
"""
return None
@unguarded
def warning(self, msg):
"""
Subclasses call this to indicate that something unexpected
occurred while interacting with the page.
Page objects themselves should never make assertions or
raise exceptions, but they can issue warnings to make
tests easier to debug.
Args:
msg (str): The message to log as a warning.
Returns:
None
"""
log = logging.getLogger(self.__class__.__name__)
log.warning(msg)
@unguarded
def visit(self):
"""
Open the page containing this page object in the browser.
Some page objects may not provide a URL, in which case
a `NotImplementedError` will be raised.
Raises:
PageLoadError: The page did not load successfully.
NotImplementedError: The page object does not provide a URL to visit.
Returns:
PageObject
"""
if self.url is None:
raise NotImplementedError("Page {} does not provide a URL to visit.".format(self))
# Validate the URL
if not self.validate_url(self.url):
raise PageLoadError("Invalid URL: '{}'".format(self.url))
# Visit the URL
try:
self.browser.get(self.url)
except (WebDriverException, socket.gaierror):
raise PageLoadError("Could not load page '{!r}' at URL '{}'".format(
self, self.url
))
# Give the browser enough time to get to the page, then return the page object
# so that the caller can chain the call with an action:
# Example: FooPage.visit().do_something()
#
# A BrokenPromise will be raised if the page object's is_browser_on_page method
# does not return True before timing out.
try:
return self.wait_for_page()
except BrokenPromise:
raise PageLoadError("Timed out waiting to load page '{!r}' at URL '{}'".format(
self, self.url
))
@classmethod
@unguarded
def validate_url(cls, url):
"""
Return a boolean indicating whether the URL has a protocol and hostname.
If a port is specified, ensure it is an integer.
Arguments:
url (str): The URL to check.
Returns:
Boolean indicating whether the URL has a protocol and hostname.
"""
result = urlparse.urlsplit(url)
# Check that we have a protocol and hostname
if not result.scheme or not result.netloc:
return False
# Check that the port is an integer
try:
if result.port is not None:
int(result.port)
elif result.netloc.endswith(':'):
# Valid URLs do not end with colons.
return False
except ValueError:
return False
else:
return True
def _verify_page(self):
"""
Ask the page object if we're on the right page;
if not, raise a `WrongPageError`.
"""
if not self.is_browser_on_page():
msg = "Not on the correct page to use '{!r}' at URL '{}'".format(
self, self.url
)
raise WrongPageError(msg)
def _verify_xss_exposure(self):
"""
Verify that there are no obvious XSS exposures on the page (based on test authors
including XSS_INJECTION in content rendered on the page).
If an xss issue is found, raise a 'XSSExposureError'.
"""
# Use innerHTML to get dynamically injected HTML as well as server-side HTML.
html_source = self.browser.execute_script(
"return document.documentElement.innerHTML.toLowerCase()"
)
# Check taken from https://www.owasp.org/index.php/XSS_Filter_Evasion_Cheat_Sheet#XSS_Locator.
all_hits_count = html_source.count(XSS_HTML)
if all_hits_count > 0:
safe_hits_count = len(EXPECTED_ATTRIBUTE_FORMAT.findall(html_source))
if all_hits_count > safe_hits_count:
potential_hits = re.findall('<[^<]+<xss', html_source)
raise XSSExposureError(
"{} XSS issue(s) found on page. Potential places are {}".format(
all_hits_count - safe_hits_count, potential_hits
)
)
@unguarded
def wait_for_page(self, timeout=30):
"""
Block until the page loads, then returns the page.
Useful for ensuring that we navigate successfully to a particular page.
Keyword Args:
timeout (int): The number of seconds to wait for the page before timing out with an exception.
Raises:
BrokenPromise: The timeout is exceeded without the page loading successfully.
"""
def _is_document_ready():
"""
Check the loading state of the document to ensure the document and all sub-resources
have finished loading (the document load event has been fired.)
"""
return self.browser.execute_script(
"return document.readyState=='complete'")
EmptyPromise(
_is_document_ready,
"The document and all sub-resources have finished loading.",
timeout=timeout
).fulfill()
result = Promise(
lambda: (self.is_browser_on_page(), self), "loaded page {!r}".format(self),
timeout=timeout
).fulfill()
if self.verify_accessibility:
self.a11y_audit.check_for_accessibility_errors() # pylint: disable=no-member
return result
@unguarded
def q(self, **kwargs): # pylint: disable=invalid-name
"""
Construct a query on the browser.
Example usages:
.. code:: python
self.q(css="div.foo").first.click()
self.q(xpath="/foo/bar").text
Keyword Args:
css: A CSS selector.
xpath: An XPath selector.
Returns:
BrowserQuery
"""
if self.verify_xss:
self._verify_xss_exposure()
return BrowserQuery(self.browser, **kwargs)
@contextmanager
def handle_alert(self, confirm=True):
"""
Context manager that ensures alerts are dismissed.
Example usage:
.. code:: python
with self.handle_alert():
self.q(css='input.submit-button').first.click()
Keyword Args:
confirm (bool): Whether to confirm or cancel the alert.
Returns:
None
"""
# Before executing the `with` block, stub the confirm/alert functions
script = dedent("""
window.confirm = function() {{ return {0}; }};
window.alert = function() {{ return; }};
""".format("true" if confirm else "false")).strip()
self.browser.execute_script(script)
# Execute the `with` block
yield
@unguarded
def wait_for_ajax(self, timeout=30):
"""
Wait for jQuery to be loaded and for all ajax requests to finish. Note
that we have to wait for jQuery to load first because it is used to
check that ajax requests are complete.
Important: If you have an ajax requests that results in a page reload,
you will need to use wait_for_page or some other method to confirm that
the page has finished reloading after wait_for_ajax has returned.
Example usage:
.. code:: python
self.q(css='input#email').fill("foo")
self.wait_for_ajax()
Keyword Args:
timeout (int): The number of seconds to wait before timing out with
a BrokenPromise exception.
Returns:
None
Raises:
BrokenPromise: The timeout is exceeded before (1) jQuery is defined
and (2) all ajax requests are completed.
"""
def _is_ajax_finished():
"""
Check if all the ajax calls on the current page have completed.
"""
# Wait for jQuery to be defined first, so that jQuery.active
# doesn't raise an error that 'jQuery is not defined'. We have
# seen this as a flaky pattern possibly related to pages reloading
# while wait_for_ajax is being called.
return self.browser.execute_script(
"return typeof(jQuery)!='undefined' && jQuery.active==0")
EmptyPromise(
_is_ajax_finished,
"Finished waiting for ajax requests.",
timeout=timeout
).fulfill()
@unguarded
def wait_for(self, promise_check_func, description, result=False, timeout=60):
"""
Calls the method provided as an argument until the Promise satisfied or BrokenPromise
Arguments:
promise_check_func (callable):
* If `result` is False Then
Function that accepts no arguments and returns a boolean indicating whether the promise is fulfilled
* If `result` is True Then
Function that accepts no arguments and returns a `(is_satisfied, result)` tuple,
where `is_satisfied` is a boolean indicating whether the promise was satisfied, and `result`
is a value to return from the fulfilled `Promise`
description (str): Description of the Promise, used in log messages
result (bool): Indicates whether we need result
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
Raises:
BrokenPromise: the `Promise` was not satisfied
"""
if result:
return Promise(promise_check_func, description, timeout=timeout).fulfill()
else:
return EmptyPromise(promise_check_func, description, timeout=timeout).fulfill()
@unguarded
def wait_for_element_presence(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` to be present in DOM.
Example usage:
.. code:: python
self.wait_for_element_presence('.submit', 'Submit Button is Present')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: self.q(css=element_selector).present, description=description, timeout=timeout)
@unguarded
def wait_for_element_absence(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` until it disappears from DOM.
Example usage:
.. code:: python
self.wait_for_element_absence('.submit', 'Submit Button is not Present')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: not self.q(css=element_selector).present, description=description, timeout=timeout)
@unguarded
def wait_for_element_visibility(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` until it is displayed on web page.
Example usage:
.. code:: python
self.wait_for_element_visibility('.submit', 'Submit Button is Visible')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: self.q(css=element_selector).visible, description=description, timeout=timeout)
@unguarded
def wait_for_element_invisibility(self, element_selector, description, timeout=60):
"""
Waits for element specified by `element_selector` until it disappears from the web page.
Example usage:
.. code:: python
self.wait_for_element_invisibility('.submit', 'Submit Button Disappeared')
Arguments:
element_selector (str): css selector of the element.
description (str): Description of the Promise, used in log messages.
timeout (float): Maximum number of seconds to wait for the Promise to be satisfied before timing out
"""
self.wait_for(lambda: self.q(css=element_selector).invisible, description=description, timeout=timeout)
@unguarded
def scroll_to_element(self, element_selector, timeout=60):
"""
Scrolls the browser such that the element specified appears at the top. Before scrolling, waits for
the element to be present.
Example usage:
.. code:: python
self.scroll_to_element('.far-down', 'Scroll to far-down')
Arguments:
element_selector (str): css selector of the element.
timeout (float): Maximum number of seconds to wait for the element to be present on the
page before timing out.
Raises: BrokenPromise if the element does not exist (and therefore scrolling to it is not possible)
"""
# Ensure element exists
msg = "Element '{element}' is present".format(element=element_selector)
self.wait_for(lambda: self.q(css=element_selector).present, msg, timeout=timeout)
# Obtain coordinates and use those for JavaScript call
loc = self.q(css=element_selector).first.results[0].location
self.browser.execute_script("window.scrollTo({x},{y})".format(x=loc['x'], y=loc['y']))
|
devs1991/test_edx_docmode
|
venv/lib/python2.7/site-packages/bok_choy/page_object.py
|
Python
|
agpl-3.0
| 21,953
|
[
"VisIt"
] |
79eb59b7d7d5849a8c56a0aa3c68bf958a74b1deb0398c7d152d68ed3b30c65a
|
from apiclient.discovery import build
from oauth2client.client import OAuth2WebServerFlow
import httplib2
import gflags
import httplib2
from apiclient.discovery import build
from oauth2client.file import Storage
from oauth2client.client import OAuth2WebServerFlow
from oauth2client.tools import run_flow
from datetime import datetime
from datetime import timedelta
from pytz import timezone
import pytz
import dateutil.parser
import sys
import os
import json
import pyScreen
import pygame
import time
import signal
import sys, getopt
def handler(signum, frame):
print "Told to die ..."
sys.exit()
signal.signal(signal.SIGTERM, handler)
print "Current PID ", os.getpid()
import calendar_config
FLAGS = gflags.FLAGS
# Set up a Flow object to be used if we need to authenticate. This
# sample uses OAuth 2.0, and we set up the OAuth2WebServerFlow with
# the information it needs to authenticate. Note that it is called
# the Web Server Flow, but it can also handle the flow for native
# applications
# The client_id and client_secret can be found in Google Developers Console
FLOW = OAuth2WebServerFlow(
client_id=calendar_config.CLIENT_ID,
client_secret=calendar_config.CLIENT_SECRET,
scope=calendar_config.SCOPE,
user_agent=calendar_config.USER_AGENT)
# If the Credentials don't exist or are invalid, run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('calendar.dat')
credentials = storage.get()
if credentials is None or credentials.invalid == True:
credentials = run_flow(FLOW, storage)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http(timeout=30)
http = credentials.authorize(http)
# Build a service object for interacting with the API. Visit
# the Google Developers Console
# to get a developerKey for your own application.
foundServer=False
while not foundServer:
try:
service = build(serviceName='calendar', version='v3', http=http,developerKey=calendar_config.DEVELOPER_KEY)
foundServer=True
print 'service built'
except Exception as e:
print "could not build, retrying ..."
print 'exception - ',e
time.sleep(5)
foundServer=False
la = pytz.timezone(calendar_config.TIMEZONE)
def create_time_string(dt):
if not dt:
return None
hours, remainder = divmod(dt.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
minutes=minutes+1
h = 'hours'
m = 'minutes'
if hours == 1:
h = 'hour'
if minutes == 1:
m = 'minute'
if hours == 0:
return '%s %s' % (minutes, m)
else:
return '%s %s and %s %s' % (hours, h, minutes, m)
def get_events(room_name):
items = []
#now = datetime.utcnow()
#la_offset = la.utcoffset(datetime.utcnow())
#now = now + la_offset
now=datetime.now(la)
#now=now.replace(tzinfo=la)
start_time = datetime(year=now.year, month=now.month, day=now.day, tzinfo=la)
end_time = start_time + timedelta(days=1)
print "Running at", now.strftime("%A %d %B %Y, %I:%M%p"),
print " - ", room_name
if not os.path.isfile('calendars.json'):
# this is duplicated from the calendars() method
calendars = {}
calendar_list = service.calendarList().list().execute()
for calendar_list_entry in calendar_list['items']:
#if calendar_list_entry['id'] not in calendar_config.EXCLUSIONS:
calendars[calendar_list_entry['id']] = calendar_list_entry['summary']
# store this to a local file
with open('calendars.json', mode='w') as calendar_file:
json.dump({value: key for key, value in calendars.items()}, calendar_file)
with open('calendars.json', 'r') as f:
calendars = json.load(f)
if room_name not in calendars:
return {
'roomState' : pyScreen.roomState.notFound,
'events' : {},
'status' : '!Error!',
'now': now.strftime("%A %d %B %Y, %I:%M%p")
}
room_id = calendars[room_name]
print ' execute',room_id,start_time.isoformat(),end_time.isoformat(),
events = service.events().list(
calendarId=room_id,
orderBy='startTime',
singleEvents=True,
timeMin=start_time.isoformat(),
timeMax=end_time.isoformat()
).execute()
print 'out'
currentEventId=None
next_start = None
next_end = None
status = "FREE"
roomCurrentState = pyScreen.roomState.free
for event in events['items']:
#DEBUG
#print event
#print '----------------------------------------'
# if this is an all day event it has a 'date' for start and end, not a 'dateTime'
if not 'dateTime' in event['start']:
start = dateutil.parser.parse(event['start']['date']).astimezone(la)#.replace(tzinfo=None)
else:
start = dateutil.parser.parse(event['start']['dateTime']).astimezone(la)#.replace(tzinfo=None)
if not 'dateTime' in event['end']:
end = dateutil.parser.parse(event['end']['date']).astimezone(la)#.replace(tzinfo=la)
else:
end = dateutil.parser.parse(event['end']['dateTime']).astimezone(la)#.replace(tzinfo=la)
if not 'displayName' in event['creator']:
event['creator']['displayName']=event['creator']['email']
if not 'summary' in event:
event['summary']="(no title)"
# if we are before it ends, add it to the list of upcoming
if now <= end:
items.append({'name': event['summary'],
'creator': event['creator']['displayName'],
'start': start.astimezone(la).strftime("%I:%M%p"),
'end': end.astimezone(la).strftime("%I:%M%p"),
'eventid' : event['id']
})
# if it's currently running ...
if start < now and end > now:
if 'hangoutLink' in event:
status = "OnAir"
roomCurrentState = pyScreen.roomState.busyHangout
else:
status = "BUSY"
roomCurrentState = pyScreen.roomState.busy
currentEventId=event['id']
next_end = (end - now)
if start > now and not next_start:
next_start = (start - now)
print now, start, end
print start.astimezone(la), end.astimezone(la)
print '--------------------------------------------------'
next_start_str = create_time_string(next_start)
next_end_str = create_time_string(next_end)
if roomCurrentState == pyScreen.roomState.free and next_start and next_start < timedelta(minutes=15):
roomCurrentState = pyScreen.roomState.soonBusy
status = "SOON"
print 'status ', status, 'start in ', next_start_str, 'end in ',next_end_str
print '============================================='
return {'room': events['summary'],
'serviceEngine' : service,
'roomState' : roomCurrentState,
'currentEventId':currentEventId,
'status': status,
'now': now.strftime("%A %d %B %Y, %I:%M%p"),
'calendarid' : room_id,
'events': items,
'next_start_str': next_start_str,
'next_end_str': next_end_str}
# build a screen
cs=pyScreen.calenderScreen()
rn=calendar_config.CALENDAR
exceptionsCaught=0
while True:
try:
print 'get_events',
events=get_events(rn)
print 'out'
print 'Consume',
cs.Consume(rn,events)
print 'out'
# reset exceptions caught
exceptionsCaught=0
except Exception as e:
print 'exception - ',e
exceptionsCaught=exceptionsCaught+1
if exceptionsCaught>10:
os.system('sudo shutdown -r')
break
print 'ui',
cs.UserInteraction(45)
print 'out'
|
barneyman/gcalMeetingRoom
|
local.py
|
Python
|
apache-2.0
| 7,334
|
[
"VisIt"
] |
0fd058f41dbb4c4f6654af5edf9ff39c4e0f95ec3872ea4d52554f77e808cbf9
|
from matchmaking import *
from earth_distance import *
from datetime import date
from math import exp
def get_score(User1, User2):
#initialize score
matchrank = 0.0
#strict rule on gender preference
if gender_preference_rank(User1.Gender, User2.Gender, User1.Gender_Pref, User2.Gender_Pref) == -1 :
return -1;
# distance scoring
distance = haversine(User1.Longitude, User1.Latitude, User2.Longitude, User2.Latitude)
matchrank += 20.0/(20.0 + distance) # Falls down to 0.5 at 20 km
# age scoring
usr1_dob = str(User1.DOB).split('-')
usr2_dob = str(User2.DOB).split('-')
today = date.today()
usr1_age = today.year - int(usr1_dob[2])
usr2_age = today.year - int(usr2_dob[2]) #pulls out of range error
matchrank += exp(-((usr1_age - usr2_age)**2)/(10.0)) # add a Gaussian factor
""" Ignore for now """
#matchrank = location_rank(User1.languages, User2.languages, matchrank)
#if matchrank == 0:
# print('Nogo criteria found. No match!')
# return -1
#by age
return matchrank
|
RefugeeMatchmaking/HackZurich
|
GAE_Playground/get_score.py
|
Python
|
mit
| 1,003
|
[
"Gaussian"
] |
73496ddf9a3104d499bfb44b181a1d917df9916a0b6819d5ae16bd21800da46d
|
"""Simulates a Feed for a given list of parameters and number of iterations.
Authors:
Adam Rains
"""
import numpy as np
import optics
import optics_tools
import utils
import pylab as pl
import time
import multiprocessing as mp
import feed
import csv
import time
import datetime
output = mp.Queue()
def simulate_for_x_iterations(dz, offset, seeing, alpha, use_piaa, use_tip_tilt,
stromlo_variables, iterations, output):
"""For a given set of parameters, simulate for a certain number of
iterations and average the resulting eta.
Run iterations in multiples of 16 to make optimal use of how the turbulence
patches are selected (from a larger grid of 4x4)
TODO: Do not compute turbulence in perfect seeing (0.0)
Parameters
----------
dz: float
Free parameter, determines the magnification of the microlens array
(changed to optimise coupling)
offset: int
The x/y distance that each of the outer fibres is off-centre in a
radially outwards direction
seeing: float
The seeing for the telescope in arcseconds.
alpha: float
Exponent constant from the Gaussian distribution achieved by the PIAA
optics
use_piaa: boolean
Whether to apply the PIAA optics or not
use_tip_tilt: boolean
Whether to apply Tip/Tilt correction to the incident wavefront
stromlo_variables: boolean
Whether to use the Stromlo or Subaru set of fibre and wavelength
parameters
iterations: int
The number of times to run each simulation in order to average out the
effects of turbulence
output: queue
The queue to store the simulation results in for the purposes of
multiprocessing
Returns
-------
eta_avg: [float, float, float]
The average eta values for 1, 5 and 9 fibres.
"""
#Initialise results [1 fibre, 5 fibres, 9 fibres]
eta_totals = [0, 0, 0]
# Use larger array of turbulence and select subcomponents
npix = 2048
n = 4
# Initialise count
count = 0
# Initialise Feed
r_feed = feed.Feed(stromlo_variables, use_piaa)
# IRandom element, necessities multiple iterations
if seeing > 0.0:
while iterations > 0:
# Generate large turbulence patch
turbulence = optics_tools.kmf(npix * n)
for x in xrange(0,n):
for y in xrange(0,n):
# Run for a single patch of turbulence
t_patch = turbulence[x*npix:((x+1)*npix),
y*npix:((y+1)*npix)]
c, a, eta, ef, t = r_feed.propagate_to_fibre(dz, offset,
t_patch,
seeing, alpha,
use_tip_tilt)
# Add
eta_totals[0] += eta[0]
eta_totals[1] += eta[1]
eta_totals[2] += eta[2]
# Decrement iterations
iterations -= 1
count += 1
print count, " - ", dz, offset, seeing, alpha, use_piaa,
print use_tip_tilt, stromlo_variables,
print str(datetime.datetime.now().time())
# All finished, compute average eta
eta_avg = [x / count for x in eta_totals]
# In perfect seeing, no need to iterate as there is no random element
else:
c, a, eta, ef, t = r_feed.propagate_to_fibre(dz, offset, 1.0, seeing,
alpha, use_tip_tilt)
eta_avg = eta
print dz, offset, seeing, alpha, use_piaa, use_tip_tilt,
print stromlo_variables, str(datetime.datetime.now().time())
output.put([eta_avg[0], 1, dz, seeing, alpha, float(use_piaa),
float(use_tip_tilt), float(stromlo_variables), offset, count])
output.put([eta_avg[1], 5, dz, seeing, alpha, float(use_piaa),
float(use_tip_tilt), float(stromlo_variables), offset, count])
output.put([eta_avg[2], 9, dz, seeing, alpha, float(use_piaa),
float(use_tip_tilt), float(stromlo_variables), offset, count])
return eta_avg
def simulate(results_save_path, offset, iterations, max_running_processes,
dz_values, seeing_values=np.arange(0,3.25,0.25), use_piaa=True,
use_tip_tilt=True, alpha_values=[2.0], stromlo_variables=True):
"""Simulates fibre propagation for sets of dz, seeing and alpha parameters,
averaging the resulting eta over a supplied number of iterations.
Parameters
----------
results_save_path: string
The path to save the .csv to.
offset: int
The x/y distance that each of the outer fibres is off-centre by in an
outwards direction
iterations: int
The number of times to run each simulation in order to average out the
effects of turbulence
dz_values: [float]
The list of dz values to simulate.
seeing_values: [float]
The list of seeing values to simulate.
use_piaa: boolean
Whether to apply the PIAA optics or not
use_tip_tilt: boolean
Whether to apply Tip/Tilt correction to the incident wavefront
alpha_values: [float]
The list of alpha values to simulate.
stromlo_variables: boolean
Whether to use the Stromlo or Subaru set of fibre and wavelength
parameters
Returns
-------
simulation_results: 2D list
The result and parameters for each simulation:
['eta', '# Fibres','dz','seeing','alpha','use_piaa','use_tip_tilt',
'stromlo_variables', 'offset', 'iterations']
"""
# Initialise list to store results
simulation_results = [['eta', '# Fibres','dz','seeing','alpha','use_piaa',
'use_tip_tilt', 'stromlo_variables', 'offset',
'iterations']]
processes = []
output = mp.Queue()
# For each set of dz-alpha-seeing, run for the given number of iterations
for dz in dz_values:
for alpha in alpha_values:
for seeing in seeing_values:
# Construct processes
processes.append(mp.Process(target=simulate_for_x_iterations,
args=(dz, offset, seeing, alpha, use_piaa,
use_tip_tilt, stromlo_variables,
iterations, output)))
# Limit the number of active processes at any one time
total_processes = len(processes)
finished_processes = 0
while len(processes) > 0:
# Start a given number of processes
running_processes = []
for i in xrange(0, max_running_processes):
if len(processes) > 0:
running_processes.append(processes.pop())
# Run processes
for p in running_processes:
p.start()
# Exit the completed processes
for p in running_processes:
p.join()
finished_processes += 1
# Get the results
for i in xrange(0, total_processes*3):
simulation_results.append(output.get())
try:
# All results obtained, save as file
with open(results_save_path, "wb") as f:
writer = csv.writer(f)
writer.writerows(simulation_results)
except:
print "Error when writing to .csv"
else:
return simulation_results
return simulation_results
def plot_simulation_results(csv_path, image_path, independent_var, legend_var,
graph_var, find_max=False):
"""Loads simulation results from a csv file and plots corresponding graphs,
pulling relevant information regarding the simulations from the file.
Parameters
----------
csv_path: string
The path to the saved csv of results
image_path: string
The save path for the plots.
"""
# Load the csv
# [eta, # Fibres, dz, seeing, alpha, use_piaa, use_tip_tilt,
# stromlo_variables, offset, iterations]
results = np.loadtxt(open(csv_path, "rb"), delimiter=",",skiprows=1)
# Reconstruct the simulation information
number_of_fibres = [int(i) for i in np.sort(np.unique(results[:,1]))]
dz_values = np.sort(np.unique(results[:,2]))
seeing_values = np.sort(np.unique(results[:,3]))
alpha_values = np.sort(np.unique(results[:,4]))
use_piaa = bool(results[0,5])
use_tip_tilt = bool(results[0,6])
stromlo_variables = bool(results[0,7])
offset = results[0,8]
iterations = results[0,9]
# Use dictionary to map variables to strings for customisation
parameters = {}
parameters["fibres"] = [number_of_fibres, 1]
parameters["dz"] = [dz_values, 2]
parameters["seeing"] = [seeing_values, 3]
parameters["alpha"] = [alpha_values, 4]
x_vals = parameters[independent_var]
L_vals = parameters[legend_var]
g_vals = parameters[graph_var]
# Construct a graph for each seeing value, a line for each alpha value and a
# point for each eta-dz pair
for g in g_vals[0]:
pl.clf()
for L in L_vals[0]:
x = []
eta = []
# Find the entries in results that match the given dz and alpha
# number of fibre
for simulation in xrange(0, len(results)):
if (results[simulation, L_vals[1]] == L) and \
(results[simulation, g_vals[1]] == g):
x.append(results[simulation, x_vals[1]])
eta.append(results[simulation, 0])
# All matching simulations found, plot line
e_sorted = [e for (xx,e) in sorted(zip(x,eta))]
pl.plot(x_vals[0], e_sorted, label=(legend_var + " = " + str(L)))
# Used to optimise over a given variable (namely dz) to find where
# eta is maximised
if find_max:
max_eta = max(e_sorted)
i = e_sorted.index(max_eta)
print L, "-", x_vals[0][i], "(" + str(int(g)) + ")"
# All alphas plotted, apply labels/legend and save
title = "Simulation of Eta vs " + independent_var + ", " + \
str(int(iterations)) + " Iterations "
details = "(" + graph_var + " = " + str(g) + ", offset = " + \
str(offset) + ", PIAA = " + str(use_piaa) + ", Tip-Tilt = " + \
str(use_tip_tilt) + ", Stromlo Feed = " + \
str(stromlo_variables) + ")"
pl.title(title + "\n" + details, fontsize=12)
pl.xlabel(independent_var, fontsize=12)
pl.ylabel(r'$\eta$')
pl.legend(prop={'size':10}, loc='upper right')
pl.grid()
#pl.ylim([0.0,1.0])
pl.savefig( (image_path + title + details + ".png" ))
|
mikeireland/astro-optics
|
simulate.py
|
Python
|
mit
| 11,306
|
[
"Gaussian"
] |
49e5ab4587813994d4a0ddb76f478ab6331a9b84dc3781ba12eb3e9c726c693a
|
#
# Copyright 2014, 2020-2021 Lars Pastewka (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2014) James Kermode, King's College London
# Lars Pastewka, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
import unittest
import numpy as np
import ase.io as io
from ase.lattice.cubic import Diamond
import matscipytest
from matscipy.neighbours import mic, neighbour_list
from matscipy.atomic_strain import (get_delta_plus_epsilon_dgesv,
get_delta_plus_epsilon,
get_D_square_min)
###
class TestAtomicStrain(matscipytest.MatSciPyTestCase):
def test_dsygv_dgelsd(self):
a = Diamond('C', size=[4,4,4])
b = a.copy()
b.positions += (np.random.random(b.positions.shape)-0.5)*0.1
i, j = neighbour_list("ij", b, 1.85)
dr_now = mic(b.positions[i] - b.positions[j], b.cell)
dr_old = mic(a.positions[i] - a.positions[j], a.cell)
dgrad1 = get_delta_plus_epsilon_dgesv(len(b), i, dr_now, dr_old)
dgrad2 = get_delta_plus_epsilon(len(b), i, dr_now, dr_old)
self.assertArrayAlmostEqual(dgrad1, dgrad2)
###
if __name__ == '__main__':
unittest.main()
|
libAtoms/matscipy
|
tests/test_atomic_strain.py
|
Python
|
lgpl-2.1
| 2,827
|
[
"ASE",
"Matscipy"
] |
8d0d370edef53a3ec08679d86f098cc316397eb292e25982025f2261c686f817
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RCodex(RPackage):
"""A Normalization and Copy Number Variation Detection Method for Whole
Exome Sequencing
A normalization and copy number variation calling procedure for whole
exome DNA sequencing data. CODEX relies on the availability of multiple
samples processed using the same sequencing pipeline for normalization, and
does not require matched controls. The normalization model in CODEX
includes terms that specifically remove biases due to GC content, exon
length and targeting and amplification efficiency, and latent systemic
artifacts. CODEX also includes a Poisson likelihood-based recursive
segmentation procedure that explicitly models the count-based exome
sequencing data."""
homepage = "https://www.bioconductor.org/packages/release/bioc/html/CODEX.html"
git = "https://git.bioconductor.org/packages/CODEX"
version('1.22.0', commit='aa0ee4278111a46e0c790312b0526ba07aab22eb')
version('1.18.0', commit='9a95cccc7ff3fe587636317e21e39a07dddf80bc')
depends_on('r@3.2.3:', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-bsgenome-hsapiens-ucsc-hg19', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/r-codex/package.py
|
Python
|
lgpl-2.1
| 1,644
|
[
"Bioconductor"
] |
8e4b7b443833a00e27bafb4a6cd00c483d1212b6fda0b6774cfc9ff1b5fa0ddd
|
# -*- coding: utf-8 -*-
from coordinate import *
from coards import *
from cf import *
from conventions import *
from generic import *
CONVENTION_CLASSES = [
CF,
Coards,
Coordinate
]
"""
Classes that implement a netcdf convention.
"""
def select_convention(
dataset,
filter_out_nd_coordinates,
favor_class=None):
"""
Return a Convention specialization that implements the conventions used in
*dataset*.
filter_out_nd_coordinates
Most coordinate variables are one-dimensional. If this argument is
True, data variables depending on more-dimensional coordinate
variables are filtered out. A reason for this may be that your
application doesn't support such variables.
favor_class
In case *dataset* adheres to multiple supported conventions and
*favor_class* is one of them, then it is used. Otherwise
:py:class:`Conventions` is used.
In case *dataset* doesn't adhere to a supported convention,
:py:class:`Generic` is used. Supported conventions are listed in
:py:data:`CONVENTION_CLASSES`.
"""
assert favor_class is None or favor_class in CONVENTION_CLASSES, favor_class
conventions = []
for convention_class in CONVENTION_CLASSES:
if convention_class.conforms(dataset):
conventions.append(convention_class(dataset,
filter_out_nd_coordinates))
result = None
if len(conventions) == 0:
# Dataset doesn't adhere to one of the supported conventions.
result = Generic(dataset, filter_out_nd_coordinates)
elif len(conventions) == 1:
# Dataset adheres to exactly one supported convention.
result = conventions[0]
else:
# Dataset adheres to more than one supported convention.
if favor_class is not None and favor_class in [type(convention) for
convention in conventions]:
# Select the favored convention.
result = favor_class(dataset, filter_out_nd_coordinates)
else:
# Use all conventions.
result = Conventions(dataset, filter_out_nd_coordinates,
conventions)
return result
|
jfrygeo/solutions-geoprocessing-toolbox
|
suitability/toolboxes/scripts/MultidimensionSupplementalTools/MultidimensionSupplementalTools/Scripts/mds/netcdf/convention/__init__.py
|
Python
|
apache-2.0
| 2,206
|
[
"NetCDF"
] |
e6c20a5e717577334cf6c2e433842ab5ac1fe385fe6fcfcf464dfbd27c0aa4de
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 University of Liège
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sph.helpers import *
if __name__=="__main__":
boxL = 2.
Lfloor = 0.7
Lwater = 0.5
sep = 0.05
kernel = Kernel('cubic', True) # 'cubic', 'quadratic' or 'quintic'
law = EqState('liquid') # 'gas' or 'liquid'
# parameters
model = Model()
model.kernel = kernel
model.law = law
model.h_0 = 0.06 # initial smoothing length [m]
model.c_0 = 35.0 # initial speed of sound [m/s]
model.rho_0 = 1000.0 # initial density [kg/m^3]
model.dom_dim = boxL # domain size (cube)
model.alpha = 0.5 # artificial viscosity factor 1
model.beta = 0.0 # artificial viscosity factor 2
model.maxTime = 1.0 # simulation time
model.saveInt = 0.01 # save interval
# mobile particles
cube = Cube( o=(((boxL-Lwater)/2),((boxL-Lwater)/2), ((boxL)/2)+0.5), L=(Lwater,Lwater,Lwater), rho=model.rho_0, s=sep)
model.addMobile(cube.generate())
# fixed particles
plane = Cube( o=(((boxL-Lfloor)/2),((boxL-Lfloor)/2), (boxL/2)), L=(Lfloor,Lfloor,sep), rho=model.rho_0, s=sep)
model.addFixed(plane.generate())
plane = Cube( o=(0,0,0), L=(boxL,boxL,sep), rho=model.rho_0, s=sep)
model.addFixed(plane.generate())
# run SPH model
print(model)
model.run()
# convert to VTK
import sph.gui as gui
gui.ToParaview(verb=False).convertall()
|
rboman/progs
|
classes/sph0/louis/tests/waterdrop.py
|
Python
|
apache-2.0
| 2,097
|
[
"VTK"
] |
167607f6a799e6fd1bb49d2b9deae865e0b743a7babf74a057d4317e7b17f643
|
""" I wish who wrote this would have put some doc...
IIUC this is a wrapper around the JobState object. It basically tries to cache
everything locally instead of going to the DB.
"""
import copy
import time
from DIRAC.Core.Utilities import Time, DEncode
from DIRAC import S_OK, S_ERROR, gLogger
from DIRAC.WorkloadManagementSystem.Client.JobState.JobState import JobState
from DIRAC.WorkloadManagementSystem.Client.JobState.JobManifest import JobManifest
class CachedJobState(object):
log = gLogger.getSubLogger("CachedJobState")
def __init__(self, jid, skipInitState=False):
self.dOnlyCache = False
self.__jid = jid
self.__jobState = JobState(jid)
self.cleanState(skipInitState=skipInitState)
def cleanState(self, skipInitState=False):
self.__cache = {}
self.__jobLog = []
self.__insertIntoTQ = False
self.__dirtyKeys = set()
self.__manifest = False
self.__initState = None
self.__lastValidState = time.time()
if not skipInitState:
result = self.getAttributes(["Status", "MinorStatus", "LastUpdateTime"])
if result['OK']:
self.__initState = result['Value']
else:
self.__initState = None
def recheckValidity(self, graceTime=600):
now = time.time()
if graceTime <= 0 or now - self.__lastValidState > graceTime:
self.__lastValidState = now
result = self.__jobState.getAttributes(["Status", "MinorStatus", "LastUpdateTime"])
if not result['OK']:
return result
currentState = result['Value']
if not currentState == self.__initState:
return S_OK(False)
return S_OK(True)
return S_OK(self.valid)
@property
def valid(self):
return self.__initState is not None
@property
def jid(self):
return self.__jid
def getDirtyKeys(self):
return set(self.__dirtyKeys)
def commitChanges(self):
if self.__initState is None:
return S_ERROR("CachedJobState( %d ) is not valid" % self.__jid)
changes = {}
for k in self.__dirtyKeys:
changes[k] = self.__cache[k]
result = self.__jobState.commitCache(self.__initState, changes, self.__jobLog)
try:
result.pop('rpcStub')
except KeyError:
pass
if not result['OK']:
self.cleanState()
return result
if not result['Value']:
self.cleanState()
return S_ERROR("Initial state was different")
newState = result['Value']
self.__jobLog = []
self.__dirtyKeys.clear()
# Save manifest
if self.__manifest and self.__manifest.isDirty():
result = self.__jobState.setManifest(self.__manifest)
if not result['OK']:
self.cleanState()
for _ in range(5):
if self.__jobState.rescheduleJob()['OK']:
break
return result
self.__manifest.clearDirty()
# Insert into TQ
if self.__insertIntoTQ:
result = self.__jobState.insertIntoTQ()
if not result['OK']:
self.cleanState()
for _ in range(5):
if self.__jobState.rescheduleJob()['OK']:
break
return result
self.__insertIntoTQ = False
self.__initState = newState
self.__lastValidState = time.time()
return S_OK()
def serialize(self):
if self.__manifest:
manifest = (self.__manifest.dumpAsCFG(), self.__manifest.isDirty())
else:
manifest = None
return DEncode.encode((self.__jid, self.__cache, self.__jobLog, manifest,
self.__initState, self.__insertIntoTQ, tuple(self.__dirtyKeys)))
@staticmethod
def deserialize(stub):
dataTuple, _slen = DEncode.decode(stub)
if len(dataTuple) != 7:
return S_ERROR("Invalid stub")
# jid
if not isinstance(dataTuple[0], (int, long)):
return S_ERROR("Invalid stub 0")
# cache
if not isinstance(dataTuple[1], dict):
return S_ERROR("Invalid stub 1")
# trace
if not isinstance(dataTuple[2], list):
return S_ERROR("Invalid stub 2")
# manifest
if dataTuple[3] is not None and (not isinstance(dataTuple[3], tuple) and len(dataTuple[3]) != 2):
return S_ERROR("Invalid stub 3")
# initstate
if not isinstance(dataTuple[4], dict):
return S_ERROR("Invalid stub 4")
# Insert into TQ
if not isinstance(dataTuple[5], bool):
return S_ERROR("Invalid stub 5")
# Dirty Keys
if not isinstance(dataTuple[6], tuple):
return S_ERROR("Invalid stub 6")
cjs = CachedJobState(dataTuple[0], skipInitState=True)
cjs.__cache = dataTuple[1]
cjs.__jobLog = dataTuple[2]
dt3 = dataTuple[3]
if dataTuple[3]:
manifest = JobManifest()
result = manifest.loadCFG(dt3[0])
if not result['OK']:
return result
if dt3[1]:
manifest.setDirty()
else:
manifest.clearDirty()
cjs.__manifest = manifest
cjs.__initState = dataTuple[4]
cjs.__insertIntoTQ = dataTuple[5]
cjs.__dirtyKeys = set(dataTuple[6])
return S_OK(cjs)
def __cacheAdd(self, key, value):
self.__cache[key] = value
self.__dirtyKeys.add(key)
def __cacheExists(self, keyList):
if isinstance(keyList, basestring):
keyList = [keyList]
for key in keyList:
if key not in self.__cache:
return False
return True
def __cacheResult(self, cKey, functor, fArgs=None):
# If it's a string
if isinstance(cKey, basestring):
if cKey not in self.__cache:
if self.dOnlyCache:
return S_ERROR("%s is not cached")
if not fArgs:
fArgs = tuple()
result = functor(*fArgs)
if not result['OK']:
return result
data = result['Value']
self.__cache[cKey] = data
return S_OK(self.__cache[cKey])
# Tuple/List
elif isinstance(cKey, (list, tuple)):
if not self.__cacheExists(cKey):
if self.dOnlyCache:
return S_ERROR("%s is not cached")
if not fArgs:
fArgs = tuple()
result = functor(*fArgs)
if not result['OK']:
return result
data = result['Value']
if len(cKey) != len(data):
gLogger.warn(
"CachedJobState.__memorize( %s, %s = %s ) doesn't receive the same amount of values as keys" %
(cKey, functor, data))
return data
for i, val in enumerate(cKey):
self.__cache[val] = data[i]
# Prepare result
return S_OK(tuple([self.__cache[cK] for cK in cKey]))
else:
raise RuntimeError("Cache key %s does not have a valid type" % cKey)
def __cacheDict(self, prefix, functor, keyList=None):
if not keyList or not self.__cacheExists(["%s.%s" % (prefix, key) for key in keyList]):
result = functor(keyList)
if not result['OK']:
return result
data = result['Value']
for key in data:
cKey = "%s.%s" % (prefix, key)
# If the key is already in the cache. DO NOT TOUCH. User may have already modified it.
# We update the coming data with the cached data
if cKey in self.__cache:
data[key] = self.__cache[cKey]
else:
self.__cache[cKey] = data[key]
return S_OK(data)
return S_OK(dict([(key, self.__cache["%s.%s" % (prefix, key)]) for key in keyList]))
def _inspectCache(self):
return copy.deepcopy(self.__cache)
def _clearCache(self):
self.__cache = {}
@property
def _internals(self):
if self.__manifest:
manifest = (self.__manifest.dumpAsCFG(), self.__manifest.isDirty())
else:
manifest = None
return (self.__jid, self.dOnlyCache, dict(self.__cache),
list(self.__jobLog), manifest, dict(self.__initState), list(self.__dirtyKeys))
#
# Manifest
#
def getManifest(self):
if not self.__manifest:
result = self.__jobState.getManifest()
if not result['OK']:
return result
self.__manifest = result['Value']
return S_OK(self.__manifest)
def setManifest(self, manifest):
if not isinstance(manifest, JobManifest):
jobManifest = JobManifest()
result = jobManifest.load(str(manifest))
if not result['OK']:
return result
manifest = jobManifest
manifest.setDirty()
self.__manifest = manifest
# self.__manifest.clearDirty()
return S_OK()
# Attributes
#
def __addLogRecord(self, majorStatus=None, minorStatus=None, appStatus=None, source=None):
record = {}
if majorStatus:
record['status'] = majorStatus
if minorStatus:
record['minor'] = minorStatus
if appStatus:
record['application'] = appStatus
if not record:
return
if not source:
source = "Unknown"
self.__jobLog.append((record, Time.dateTime(), source))
def setStatus(self, majorStatus, minorStatus=None, appStatus=None, source=None):
self.__cacheAdd('att.Status', majorStatus)
if minorStatus:
self.__cacheAdd('att.MinorStatus', minorStatus)
if appStatus:
self.__cacheAdd('att.ApplicationStatus', appStatus)
self.__addLogRecord(majorStatus, minorStatus, appStatus, source)
return S_OK()
def setMinorStatus(self, minorStatus, source=None):
self.__cacheAdd('att.MinorStatus', minorStatus)
self.__addLogRecord(minorStatus=minorStatus, source=source)
return S_OK()
def getStatus(self):
return self.__cacheResult(('att.Status', 'att.MinorStatus'), self.__jobState.getStatus)
def setAppStatus(self, appStatus, source=None):
self.__cacheAdd('att.ApplicationStatus', appStatus)
self.__addLogRecord(appStatus=appStatus, source=source)
return S_OK()
def getAppStatus(self):
return self.__cacheResult('att.ApplicationStatus', self.__jobState.getAppStatus)
#
# Attribs
#
def setAttribute(self, name, value):
if not isinstance(name, basestring):
return S_ERROR("Attribute name has to be a string")
self.__cacheAdd("att.%s" % name, value)
return S_OK()
def setAttributes(self, attDict):
if not isinstance(attDict, dict):
return S_ERROR("Attributes has to be a dictionary and it's %s" % str(type(attDict)))
for key in attDict:
self.__cacheAdd("att.%s" % key, attDict[key])
return S_OK()
def getAttribute(self, name):
return self.__cacheResult('att.%s' % name, self.__jobState.getAttribute, (name, ))
def getAttributes(self, nameList=None):
return self.__cacheDict('att', self.__jobState.getAttributes, nameList)
# JobParameters --- REMOVED
# Optimizer params
def setOptParameter(self, name, value):
if not isinstance(name, basestring):
return S_ERROR("Optimizer parameter name has to be a string")
self.__cacheAdd('optp.%s' % name, value)
return S_OK()
def setOptParameters(self, pDict):
if not isinstance(pDict, dict):
return S_ERROR("Optimizer parameters has to be a dictionary")
for key in pDict:
self.__cacheAdd('optp.%s' % key, pDict[key])
return S_OK()
def getOptParameter(self, name):
return self.__cacheResult("optp.%s" % name, self.__jobState.getOptParameter, (name, ))
def getOptParameters(self, nameList=None):
return self.__cacheDict('optp', self.__jobState.getOptParameters, nameList)
# Other
def resetJob(self, source=""):
""" Reset the job!
"""
return self.__jobState.resetJob(source=source)
def getInputData(self):
return self.__cacheResult("inputData", self.__jobState.getInputData)
def insertIntoTQ(self):
if self.valid:
self.__insertIntoTQ = True
return S_OK()
return S_ERROR("Cached state is invalid")
|
fstagni/DIRAC
|
WorkloadManagementSystem/Client/JobState/CachedJobState.py
|
Python
|
gpl-3.0
| 11,494
|
[
"DIRAC"
] |
12d0d6351733e140aae0f3393d18ea431f52c26aee5bec8b37d686baf28855a6
|
tests=[
("python","UnitTestDraw.py",{}),
]
longTests=[
]
if __name__=='__main__':
import sys
from rdkit import TestRunner
failed,tests = TestRunner.RunScript('test_list.py',0,1)
sys.exit(len(failed))
|
rdkit/rdkit-orig
|
rdkit/Chem/Draw/test_list.py
|
Python
|
bsd-3-clause
| 219
|
[
"RDKit"
] |
5595c5e428b699c8b01591aea6e574ae821b4914add9b60090ef2bf0ea602cc3
|
# Copyright (C) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in project root for information.
import sys
if sys.version >= '3':
basestring = str
import pyspark
from pyspark.ml.common import inherit_doc
from pyspark.sql.types import *
from pyspark.sql.types import Row, _create_row
import numpy as np
from mmlspark._ImageTransformer import _ImageTransformer
ImageFields = ["path", "height", "width", "type", "bytes"]
ImageSchema = StructType([
StructField(ImageFields[0], StringType(), True),
StructField(ImageFields[1], IntegerType(), True),
StructField(ImageFields[2], IntegerType(), True),
StructField(ImageFields[3], IntegerType(), True), # OpenCV type: CV_8U in most cases
StructField(ImageFields[4], BinaryType(), True) ]) # OpenCV bytes: row-wise BGR in most cases
def toNDArray(image):
"""
Converts an image to a 1-dimensional array
Args:
image (object): The image to be converted
Returns:
array: The image as a 1-dimensional array
"""
return np.asarray(image.bytes, dtype = np.uint8).reshape((image.height, image.width, 3))[:,:,(2,1,0)]
def toImage(array, path = "", ocvType = 16):
"""
Converts a one-dimensional array to a 2 dimensional image
Args:
array (array):
path (str):
ocvType (int):
Returns:
object: 2 dimensional image
"""
length = np.prod(array.shape)
data = bytearray(array.astype(dtype=np.int8)[:,:,(2,1,0)].reshape(length))
height = array.shape[0]
width = array.shape[1]
# Creating new Row with _create_row(), because Row(name = value, ... ) orders fields by name,
# which conflicts with expected ImageSchema order when the new DataFrame is created by UDF
return _create_row(ImageFields, [path, height, width, ocvType, data])
from pyspark.ml.common import inherit_doc
@inherit_doc
class ImageTransformer(_ImageTransformer):
"""
Resizes the image to the given width and height
Args:
height (int): The height to resize to (>=0)
width (int): The width to resize to (>=0)
"""
def resize(self, height, width):
"""
Resizes the image to the given width and height
Args:
height (int): The height to resize to (>=0)
width (int): The width to resize to (>=0)
"""
self._java_obj.resize(height, width)
return self
def crop(self, x, y, height, width):
"""
Crops the image given the starting x,y coordinates
and the width and height
Args:
x (int): The initial x coordinate (>=0)
y (int): The initial y coordinate (>=0)
height (int): The height to crop to (>=0)
width (int): The width to crop to (>=0)
"""
self._java_obj.crop(x,y,height,width)
return self
def colorFormat(self, format):
"""
Formats the image to the given image format
Args:
format (int): The format to convert to, please see OpenCV cvtColor function documentation for all formats
"""
self._java_obj.colorFormat(format)
return self
def blur(self, height, width):
"""
Blurs the image using a normalized box filter
Args:
height (double): The height of the box filter (>= 0)
width (double): The width of the box filter (>= 0)
"""
self._java_obj.blur(height, width)
return self
def threshold(self, threshold, maxVal, thresholdType):
"""
Thresholds the image, please see OpenCV threshold function documentation for more information
Args:
threshold: (double) The threshold value
maxVal (double): The maximum value to use
thresholdType (double): The type of threshold, can be binary, binary_inv, trunc, zero, zero_inv
"""
self._java_obj.threshold(threshold, maxVal, thresholdType)
return self
def gaussianKernel(self, appertureSize, sigma):
"""
Blurs the image by applying a gaussian kernel
Args:
appertureSize (double): The aperture size, which should be odd and positive
sigma (double): The standard deviation of the gaussian
"""
self._java_obj.gaussianKernel(appertureSize, sigma)
return self
"""
Flips the image
:param int flipCode: a flag to specify how to flip the image
- 0 means flipping around the x-axis (up-down)
- positive value (for example, 1) means flipping around y-axis (left-right, default)
- negative value (for example, -1) means flipping around both axes (diagonally)
See OpenCV documentation for details.
"""
def flip(self, flipCode = 1):
self._java_obj.flip(flipCode)
return self
|
rastala/mmlspark
|
src/image-transformer/src/main/python/ImageTransformer.py
|
Python
|
mit
| 4,883
|
[
"Gaussian"
] |
33b9a8027bf3fe0341f3e88475d36c77aa2e8a811ccbb78e2784fefd022a8663
|
###!/usr/bin/python
# Galaxy Media Center Diagnostic Log Gather & Upload
from __future__ import print_function
import os, shutil, time, subprocess, xbmc, xbmcgui, tarfile, socket, struct, fcntl, argparse
# Static Variables
ACTION_PREVIOUS_MENU = 10
dialog = xbmcgui.Dialog()
dp = xbmcgui.DialogProgress()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sockfd = sock.fileno()
SIOCGIFADDR = 0x8915
dirpath='/mnt/sda2/diag'
sysfile='/mnt/sda2/diag/sysinfo.log'
sarfile='/mnt/sda2/diag/sardata.log'
PROG='uploaddiag.py'
USAGE='[options]'
DESC='Python script for gathering and sending diagnostic data'
# Parse command line arguments
parser = argparse.ArgumentParser(description=DESC, prog=PROG, usage='%(prog)s '+USAGE)
parser.add_argument('-r', '--run', required=False, help='Run script in execute mode', action='store_true')
args = parser.parse_args()
# Calculate Time for SAR
def calctime():
hour=time.strftime("%H", time.localtime())
minsec=time.strftime("%M:%S", time.localtime())
nhour=int(hour)
if nhour < 3:
ntime='00:00:00'
else:
nhour=int(hour)-3
nhour=format(nhour, '02')
nhour=str(nhour)
ntime=nhour+":"+minsec
return(ntime)
# Calculate IP Address for filename
def get_ip(iface):
ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\x00'*14)
try:
res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)
except:
return None
ip = struct.unpack('16sH2x4s8x', res)[2]
return socket.inet_ntoa(ip)
# MAIN Function
def main():
xbmc.log("*Diagnostic upload script starting... %s" % time.time(), level=xbmc.LOGNOTICE)
dp.create("Sending Diagnostic Logs"," ","Gathering and sending diagnostic information...")
dp.update(1)
time.sleep(2)
if os.path.isfile('/mnt/sda2/diag/sysinfo.log'):
os.remove('/mnt/sda2/diag/sysinfo.log')
if os.path.isfile('/mnt/sda2/diag/sardata.log'):
os.remove('/mnt/sda2/diag/sardata.log')
if os.path.isfile('/mnt/sda2/diag/system.log'):
os.remove('/mnt/sda2/diag/system.log')
if os.path.isfile('/mnt/sda2/diag/dmesg.log'):
os.remove('/mnt/sda2/diag/dmesg.log')
for line in open("/proc/cpuinfo"):
if "Serial" in line:
serial=line
serial=serial[-12:]
serial=serial[:-1]
dtime=(time.strftime("%m%d-%I%M%p"))
fsub=serial+'_'+dtime+'.tar.gz'
filename='/mnt/sda2/diag/'+fsub.rstrip()
dp.update(15)
ntime=calctime()
subprocess.call(['/home/osmc/.kodi/addons/script.gmcsystemupdate/gathersarinfo '+ntime], shell=True)
dp.update(35)
subprocess.call(['/home/osmc/.kodi/addons/script.gmcsystemupdate/gathersysinfo', ''], shell=True)
time.sleep(5)
dp.update(55)
tar = tarfile.open(filename, "w:gz")
tar.add('/home/osmc/.kodi/temp/kodi.log', arcname='kodi.log')
tar.add('/mnt/sda2/diag/system.log', arcname='system.log')
dp.update(65)
tar.add(sysfile, arcname='sysinfo.log')
tar.add(sarfile, arcname='sardata.log')
tar.close()
dp.update(75)
time.sleep(3)
dp.update(85)
subprocess.call(['/home/osmc/.kodi/addons/script.gmcsystemupdate/senddiag '+filename], shell=True)
time.sleep(4)
dp.update(100)
time.sleep(1)
# os.remove(filename)
xbmc.log("*Dianostic upload complete. %s" % time.time(), level=xbmc.LOGNOTICE)
os.remove(filename)
dp.close()
dialog.ok('Sending Diagnostic Logs',' ','Diagnostic logs sent.')
if args.run:
main()
|
cosmicc/script.gmcsystemupdate
|
uploaddiag.py
|
Python
|
mit
| 3,243
|
[
"Galaxy"
] |
95578a9509ae7219ca8722d7980f4230d66605551a6d215d55063fa2db5a8900
|
from time import time
import numpy as np
import ase.db
from ase.test.tasks.dcdft import DeltaCodesDFTCollection as Collection
from gpaw import GPAW, PW, FermiDirac
c = ase.db.connect('dcdft.db')
ecut = 340
kptdensity = 3.5
width = 0.10
collection = Collection()
for name in ['K', 'Ca', 'Ti']:
atoms = collection[name]
cell = atoms.get_cell()
# Loop over volumes:
for n, x in enumerate(np.linspace(0.98, 1.02, 5)):
id = c.reserve(name=name, x=x)
if id is None:
# This calculation has been or is being done:
continue
atoms.set_cell(cell * x, scale_atoms=True)
atoms.calc = GPAW(txt='%s-%d.txt' % (name, n),
mode=PW(ecut),
xc='PBE',
kpts={'density': kptdensity},
occupations=FermiDirac(width))
t1 = time()
atoms.get_potential_energy()
t2 = time()
# Write to database:
c.write(atoms, name=name, x=x, time=t2 - t1,
ecut=ecut, kptdensity=kptdensity, width=width)
del c[id]
|
robwarm/gpaw-symm
|
doc/exercises/dcdft/dcdft_gpaw.py
|
Python
|
gpl-3.0
| 1,131
|
[
"ASE",
"GPAW"
] |
1f05991f8fbf173a42f2b04df4043b883a6df6e6ec08965f10e1fcdf8692a437
|
################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `gate` module.
"""
import warnings
warnings.simplefilter("error")
import numpy as np
from bayespy.nodes import (Gate,
GaussianARD,
Gamma,
Categorical,
Bernoulli,
Multinomial)
from bayespy.inference.vmp.nodes.gaussian import GaussianMoments
from bayespy.utils import random
from bayespy.utils import linalg
from bayespy.utils.misc import TestCase
class TestGate(TestCase):
"""
Unit tests for Gate node.
"""
def test_init(self):
"""
Test the creation of Gate node
"""
# Gating scalar node
Z = Categorical(np.ones(3)/3)
X = GaussianARD(0, 1, shape=(), plates=(3,))
Y = Gate(Z, X)
self.assertEqual(Y.plates, ())
self.assertEqual(Y.dims, ( (), () ))
# Gating non-scalar node
Z = Categorical(np.ones(3)/3)
X = GaussianARD(0, 1, shape=(2,), plates=(3,))
Y = Gate(Z, X)
self.assertEqual(Y.plates, ())
self.assertEqual(Y.dims, ( (2,), (2,2) ))
# Plates from Z
Z = Categorical(np.ones(3)/3, plates=(4,))
X = GaussianARD(0, 1, shape=(2,), plates=(3,))
Y = Gate(Z, X)
self.assertEqual(Y.plates, (4,))
self.assertEqual(Y.dims, ( (2,), (2,2) ))
# Plates from X
Z = Categorical(np.ones(3)/3)
X = GaussianARD(0, 1, shape=(2,), plates=(4,3))
Y = Gate(Z, X)
self.assertEqual(Y.plates, (4,))
self.assertEqual(Y.dims, ( (2,), (2,2) ))
# Plates from Z and X
Z = Categorical(np.ones(3)/3, plates=(5,))
X = GaussianARD(0, 1, shape=(2,), plates=(4,1,3))
Y = Gate(Z, X)
self.assertEqual(Y.plates, (4,5))
self.assertEqual(Y.dims, ( (2,), (2,2) ))
# Gating non-default plate
Z = Categorical(np.ones(3)/3)
X = GaussianARD(0, 1, shape=(), plates=(3,4))
Y = Gate(Z, X, gated_plate=-2)
self.assertEqual(Y.plates, (4,))
self.assertEqual(Y.dims, ( (), () ))
# Fixed gating
Z = 2
X = GaussianARD(0, 1, shape=(2,), plates=(3,))
Y = Gate(Z, X)
self.assertEqual(Y.plates, ())
self.assertEqual(Y.dims, ( (2,), (2,2) ))
# Fixed X
Z = Categorical(np.ones(3)/3)
X = [1, 2, 3]
Y = Gate(Z, X, moments=GaussianMoments(0))
self.assertEqual(Y.plates, ())
self.assertEqual(Y.dims, ( (), () ))
# Do not accept non-negative cluster plates
Z = Categorical(np.ones(3)/3)
X = GaussianARD(0, 1, plates=(3,))
self.assertRaises(ValueError,
Gate,
Z,
X,
gated_plate=0)
# None of the parents have the cluster plate axis
Z = Categorical(np.ones(3)/3)
X = GaussianARD(0, 1)
self.assertRaises(ValueError,
Gate,
Z,
X)
# Inconsistent cluster plate
Z = Categorical(np.ones(3)/3)
X = GaussianARD(0, 1, plates=(2,))
self.assertRaises(ValueError,
Gate,
Z,
X)
pass
def test_message_to_child(self):
"""
Test the message to child of Gate node.
"""
# Gating scalar node
Z = 2
X = GaussianARD([1,2,3], 1, shape=(), plates=(3,))
Y = Gate(Z, X)
u = Y._message_to_child()
self.assertEqual(len(u), 2)
self.assertAllClose(u[0], 3)
self.assertAllClose(u[1], 3**2+1)
# Fixed X
Z = 2
X = [1, 2, 3]
Y = Gate(Z, X, moments=GaussianMoments(0))
u = Y._message_to_child()
self.assertEqual(len(u), 2)
self.assertAllClose(u[0], 3)
self.assertAllClose(u[1], 3**2)
# Uncertain gating
Z = Categorical([0.2,0.3,0.5])
X = GaussianARD([1,2,3], 1, shape=(), plates=(3,))
Y = Gate(Z, X)
u = Y._message_to_child()
self.assertAllClose(u[0], 0.2*1 + 0.3*2 + 0.5*3)
self.assertAllClose(u[1], 0.2*2 + 0.3*5 + 0.5*10)
# Plates in Z
Z = [2, 0]
X = GaussianARD([1,2,3], 1, shape=(), plates=(3,))
Y = Gate(Z, X)
u = Y._message_to_child()
self.assertAllClose(u[0], [3, 1])
self.assertAllClose(u[1], [10, 2])
# Plates in X
Z = 2
X = GaussianARD([1,2,3], 1, shape=(), plates=(4,3,))
Y = Gate(Z, X)
u = Y._message_to_child()
self.assertAllClose(np.ones(4)*u[0], np.ones(4)*3)
self.assertAllClose(np.ones(4)*u[1], np.ones(4)*10)
# Gating non-default plate
Z = 2
X = GaussianARD([[1],[2],[3]], 1, shape=(), plates=(3,4))
Y = Gate(Z, X, gated_plate=-2)
u = Y._message_to_child()
self.assertAllClose(np.ones(4)*u[0], np.ones(4)*3)
self.assertAllClose(np.ones(4)*u[1], np.ones(4)*10)
# Gating non-scalar node
Z = 2
X = GaussianARD([1*np.ones(4),
2*np.ones(4),
3*np.ones(4)],
1,
shape=(4,), plates=(3,))
Y = Gate(Z, X)
u = Y._message_to_child()
self.assertAllClose(u[0], 3*np.ones(4))
self.assertAllClose(u[1], 9*np.ones((4,4)) + 1*np.identity(4))
# Broadcasting the moments on the cluster axis
Z = 2
X = GaussianARD(1, 1, shape=(), plates=(3,))
Y = Gate(Z, X)
u = Y._message_to_child()
self.assertEqual(len(u), 2)
self.assertAllClose(u[0], 1)
self.assertAllClose(u[1], 1**2+1)
pass
def test_message_to_parent(self):
"""
Test the message to parents of Gate node.
"""
# Unobserved and broadcasting
Z = 2
X = GaussianARD(0, 1, shape=(), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
m = F._message_to_parent(0)
self.assertEqual(len(m), 1)
self.assertAllClose(m[0], 0*np.ones(3))
m = F._message_to_parent(1)
self.assertEqual(len(m), 2)
self.assertAllClose(m[0]*np.ones(3), [0, 0, 0])
self.assertAllClose(m[1]*np.ones(3), [0, 0, 0])
# Gating scalar node
Z = 2
X = GaussianARD([1,2,3], 1, shape=(), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe(10)
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*2, 10*2-0.5*5, 10*3-0.5*10])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [0, 0, 10])
self.assertAllClose(m[1], [0, 0, -0.5])
# Fixed X
Z = 2
X = [1,2,3]
F = Gate(Z, X, moments=GaussianMoments(0))
Y = GaussianARD(F, 1)
Y.observe(10)
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*1, 10*2-0.5*4, 10*3-0.5*9])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [0, 0, 10])
self.assertAllClose(m[1], [0, 0, -0.5])
# Uncertain gating
Z = Categorical([0.2, 0.3, 0.5])
X = GaussianARD([1,2,3], 1, shape=(), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe(10)
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*2, 10*2-0.5*5, 10*3-0.5*10])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [0.2*10, 0.3*10, 0.5*10])
self.assertAllClose(m[1], [-0.5*0.2, -0.5*0.3, -0.5*0.5])
# Plates in Z
Z = [2, 0]
X = GaussianARD([1,2,3], 1, shape=(), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe([10, 20])
m = F._message_to_parent(0)
self.assertAllClose(m[0], [[10*1-0.5*2, 10*2-0.5*5, 10*3-0.5*10],
[20*1-0.5*2, 20*2-0.5*5, 20*3-0.5*10]])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [20, 0, 10])
self.assertAllClose(m[1], [-0.5, 0, -0.5])
# Plates in X
Z = 2
X = GaussianARD([[1,2,3], [4,5,6]], 1, shape=(), plates=(2,3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe([10, 20])
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*2 + 20*4-0.5*17,
10*2-0.5*5 + 20*5-0.5*26,
10*3-0.5*10 + 20*6-0.5*37])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [[0, 0, 10],
[0, 0, 20]])
self.assertAllClose(m[1]*np.ones((2,3)), [[0, 0, -0.5],
[0, 0, -0.5]])
# Gating non-default plate
Z = 2
X = GaussianARD([[1],[2],[3]], 1, shape=(), plates=(3,1))
F = Gate(Z, X, gated_plate=-2)
Y = GaussianARD(F, 1)
Y.observe([10])
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*2, 10*2-0.5*5, 10*3-0.5*10])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [[0], [0], [10]])
self.assertAllClose(m[1], [[0], [0], [-0.5]])
# Gating non-scalar node
Z = 2
X = GaussianARD([[1,4],[2,5],[3,6]], 1, shape=(2,), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe([10,20])
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*1-0.5*2 + 20*4-0.5*17,
10*2-0.5*5 + 20*5-0.5*26,
10*3-0.5*10 + 20*6-0.5*37])
m = F._message_to_parent(1)
I = np.identity(2)
self.assertAllClose(m[0], [[0,0], [0,0], [10,20]])
self.assertAllClose(m[1], [0*I, 0*I, -0.5*I])
# Broadcasting the moments on the cluster axis
Z = 2
X = GaussianARD(2, 1, shape=(), plates=(3,))
F = Gate(Z, X)
Y = GaussianARD(F, 1)
Y.observe(10)
m = F._message_to_parent(0)
self.assertAllClose(m[0], [10*2-0.5*5, 10*2-0.5*5, 10*2-0.5*5])
m = F._message_to_parent(1)
self.assertAllClose(m[0], [0, 0, 10])
self.assertAllClose(m[1], [0, 0, -0.5])
pass
def test_mask_to_parent(self):
"""
Test the mask handling in Gate node
"""
pass
|
SalemAmeen/bayespy
|
bayespy/inference/vmp/nodes/tests/test_gate.py
|
Python
|
mit
| 10,813
|
[
"Gaussian"
] |
09c3a8ed6e0ec455e7699a8d068be7831c3d49c00cca1c49e0107237586090e5
|
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from FileTester import FileTester
from TestHarness import util
import os
class Exodiff(FileTester):
@staticmethod
def validParams():
params = FileTester.validParams()
params.addRequiredParam('exodiff', [], "A list of files to exodiff.")
params.addParam('exodiff_opts', [], "Additional arguments to be passed to invocations of exodiff.")
params.addParam('custom_cmp', "Custom comparison file")
params.addParam('use_old_floor', False, "Use Exodiff old floor option")
params.addParam('map', True, "Use geometrical mapping to match up elements. This is usually a good idea because it makes files comparable between runs with Serial and Parallel Mesh.")
params.addParam('partial', False, ("Invokes a matching algorithm similar to the -m option. However "
"this option ignores unmatched nodes and elements. This allows "
"comparison of files that only partially overlap."))
return params
def __init__(self, name, params):
FileTester.__init__(self, name, params)
if self.specs['map'] and self.specs['partial']:
raise Exception("For the Exodiff tester, you cannot specify both 'map' and 'partial' as True")
def getOutputFiles(self):
return self.specs['exodiff']
def processResultsCommand(self, moose_dir, options):
commands = []
for file in self.specs['exodiff']:
custom_cmp = ''
old_floor = ''
if self.specs.isValid('custom_cmp'):
custom_cmp = ' -f ' + os.path.join(self.getTestDir(), self.specs['custom_cmp'])
if self.specs['use_old_floor']:
old_floor = ' -use_old_floor'
if self.specs['map']:
map_option = ' -m '
else:
map_option = ' '
if self.specs['partial']:
partial_option = ' -partial '
else:
partial_option = ''
commands.append(os.path.join(moose_dir, 'framework', 'contrib', 'exodiff', 'exodiff') + map_option + partial_option + custom_cmp + ' -F' + ' ' + str(self.specs['abs_zero']) \
+ old_floor + ' -t ' + str(self.specs['rel_err']) + ' ' + ' '.join(self.specs['exodiff_opts']) + ' ' \
+ os.path.join(self.getTestDir(), self.specs['gold_dir'], file) + ' ' + os.path.join(self.getTestDir(), file))
return commands
def processResults(self, moose_dir, options, output):
output += FileTester.processResults(self, moose_dir, options, output)
if self.isFail() or self.specs['skip_checks']:
return output
# Don't Run Exodiff on Scaled Tests
if options.scaling and self.specs['scale_refine']:
return output
# Make sure that all of the Exodiff files are actually available
for file in self.specs['exodiff']:
if not os.path.exists(os.path.join(self.getTestDir(), self.specs['gold_dir'], file)):
output += "File Not Found: " + os.path.join(self.getTestDir(), self.specs['gold_dir'], file)
self.setStatus(self.fail, 'MISSING GOLD FILE')
break
if not self.isFail():
# Retrieve the commands
commands = self.processResultsCommand(moose_dir, options)
for command in commands:
exo_output = util.runCommand(command)
output += 'Running exodiff: ' + command + '\n' + exo_output + ' ' + ' '.join(self.specs['exodiff_opts'])
if ('different' in exo_output or 'ERROR' in exo_output) and not "Files are the same" in exo_output:
self.setStatus(self.diff, 'EXODIFF')
break
return output
|
nuclear-wizard/moose
|
python/TestHarness/testers/Exodiff.py
|
Python
|
lgpl-2.1
| 4,184
|
[
"MOOSE"
] |
cf5fa98731be8fdad81ff1fefba7490396482eedd0aec461843fe8bf4e28104b
|
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (C) 2014 Brian Douglass bhdouglass@gmail.com
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
from agui.awidgets import ALabel
from agui.backends.gtk.widgets import Widget
class Label(Widget, ALabel):
type = 'Label'
def __init__(self, item = None):
ALabel.__init__(self, item)
Widget.__init__(self, item)
@ALabel.text.getter
def text(self):
self._text = self.item.get_text()
return self._text
@text.setter
def text(self, value):
self.item.set_text(value)
self._text = value
|
bhdouglass/agui
|
agui/backends/gtk/widgets/label.py
|
Python
|
gpl-3.0
| 1,219
|
[
"Brian"
] |
297015112b371c08cdd19a422fd22c0715692c521e8db432e931718d7cfef772
|
def print_table(table_data):
max_len = [0] * len(table_data)
for item in range(len(table_data)):
max_len[item] = find_row_max(table_data[item])
colWidth = len(table_data)
rowWidth = len(table_data[0])
for i in range(rowWidth):
for j in range(colWidth):
print(table_data[j][i].rjust(max_len[j] + 1, ' '), end='')
print('')
def find_row_max(list_data):
max_len = len(list_data[0])
for i in range(1, len(list_data)):
if len(list_data[i]) >= max_len:
max_len = len(list_data[i])
return max_len
table = [['apples', 'oranges', 'cherries', 'banana'],
['Alice', 'Bob', 'Carol', 'David'],
['dogs', 'cats', 'moose', 'goose']]
print_table(table)
|
sallyyoo/ced2
|
py/practice/printTable.py
|
Python
|
mit
| 728
|
[
"MOOSE"
] |
ec70f03c065760ca4ae86c9d9237c9a7662df9bc98b1ab0cca98693c018303fa
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from abc import ABCMeta, abstractproperty
import numpy as np
from skbio.util._decorator import classproperty, stable
from ._grammared_sequence import _motifs as parent_motifs
class NucleotideMixin(metaclass=ABCMeta):
"""Mixin for adding funtionality for working with sequences of nucleotides.
This is an abstract base class (ABC) that cannot be instantiated.
See Also
--------
DNA
RNA
"""
__complement_lookup = None
__gc_codes = None
@classproperty
def _complement_lookup(cls):
if cls.__complement_lookup is not None:
return cls.__complement_lookup
lookup = np.zeros(cls._number_of_extended_ascii_codes, dtype=np.uint8)
for key, value in cls.complement_map.items():
lookup[ord(key)] = ord(value)
cls.__complement_lookup = lookup
return lookup
@classproperty
def _gc_codes(cls):
if cls.__gc_codes is None:
gc_iupac_chars = 'GCS'
cls.__gc_codes = np.asarray([ord(g) for g in gc_iupac_chars])
return cls.__gc_codes
@property
def _motifs(self):
return _motifs
@abstractproperty
@classproperty
@stable(as_of='0.4.0')
def complement_map(cls):
"""Return mapping of nucleotide characters to their complements.
Returns
-------
dict
Mapping of each character to its complement.
Notes
-----
Complements cannot be defined for a generic nucleotide sequence because
the complement of ``A`` is ambiguous. Thanks, nature...
"""
raise NotImplementedError
@stable(as_of='0.4.0')
def complement(self, reverse=False):
"""Return the complement of the nucleotide sequence.
Parameters
----------
reverse : bool, optional
If ``True``, return the reverse complement. If positional and/or
interval metadata are present, they will be reversed.
Returns
-------
NucleotideMixin
The (reverse) complement of the nucleotide sequence. The type and
metadata of the result will be the same as the nucleotide
sequence. If `reverse` is ``True``, positional or interval metadata
will be reversed if it is present.
See Also
--------
reverse_complement
complement_map
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TTCATT', positional_metadata={'quality':range(6)})
>>> seq
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 TTCATT
>>> seq.complement()
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 AAGTAA
>>> rc = seq.complement(reverse=True)
>>> rc
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 AATGAA
>>> rc.positional_metadata['quality'].values
array([5, 4, 3, 2, 1, 0])
"""
result = self._complement_lookup[self._bytes]
metadata = None
if self.has_metadata():
metadata = self.metadata
positional_metadata = None
if self.has_positional_metadata():
positional_metadata = self.positional_metadata
complement = self._constructor(
sequence=result,
metadata=metadata,
positional_metadata=positional_metadata)
if reverse:
# this has to be before the interval metadata code,
# because __gititem__ drops interval_metadata.
complement = complement[::-1]
if self.has_interval_metadata():
complement.interval_metadata = self.interval_metadata
if reverse:
# TODO: this can be revised to match
# positional_metadata when __getitem__
# supports interval_metadata
complement.interval_metadata._reverse()
return complement
@stable(as_of='0.4.0')
def reverse_complement(self):
"""Return the reverse complement of the nucleotide sequence.
Returns
-------
NucleotideMixin
The reverse complement of the nucleotide sequence. The type and
metadata of the result will be the same as the nucleotide
sequence. If positional metadata is present, it will be reversed.
See Also
--------
complement
is_reverse_complement
Notes
-----
This method is equivalent to ``self.complement(reverse=True)``.
Examples
--------
>>> from skbio import DNA
>>> seq = DNA('TTCATT',
... positional_metadata={'quality':range(6)})
>>> seq = seq.reverse_complement()
>>> seq
DNA
-----------------------------
Positional metadata:
'quality': <dtype: int64>
Stats:
length: 6
has gaps: False
has degenerates: False
has definites: True
GC-content: 16.67%
-----------------------------
0 AATGAA
>>> seq.positional_metadata['quality'].values
array([5, 4, 3, 2, 1, 0])
"""
return self.complement(reverse=True)
@stable(as_of='0.4.0')
def is_reverse_complement(self, other):
r"""Determine if a sequence is the reverse complement of this sequence.
Parameters
----------
other : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Sequence to compare to.
Returns
-------
bool
``True`` if `other` is the reverse complement of the nucleotide
sequence.
Raises
------
TypeError
If `other` is a ``Sequence`` object with a different type than the
nucleotide sequence.
See Also
--------
reverse_complement
Examples
--------
>>> from skbio import DNA
>>> DNA('TTCATT').is_reverse_complement('AATGAA')
True
>>> DNA('TTCATT').is_reverse_complement('AATGTT')
False
>>> DNA('ACGT').is_reverse_complement('ACGT')
True
"""
other = self._munge_to_sequence(other, 'is_reverse_complement')
# avoid computing the reverse complement if possible
if len(self) != len(other):
return False
else:
# we reverse complement ourselves because `other` is a `Sequence`
# object at this point and we only care about comparing the
# underlying sequence data
return self.reverse_complement()._string == other._string
@stable(as_of='0.4.0')
def gc_content(self):
"""Calculate the relative frequency of G's and C's in the sequence.
This includes G, C, and S characters. This is equivalent to calling
``gc_frequency(relative=True)``. Note that the sequence will be
degapped before the operation, so gap characters will not be included
when calculating the length of the sequence.
Returns
-------
float
Relative frequency of G's and C's in the sequence.
See Also
--------
gc_frequency
Examples
--------
>>> from skbio import DNA
>>> DNA('ACGT').gc_content()
0.5
>>> DNA('ACGTACGT').gc_content()
0.5
>>> DNA('ACTTAGTT').gc_content()
0.25
>>> DNA('ACGT--..').gc_content()
0.5
>>> DNA('--..').gc_content()
0
`S` means `G` or `C`, so it counts:
>>> DNA('ASST').gc_content()
0.5
Other degenerates don't count:
>>> DNA('RYKMBDHVN').gc_content()
0.0
"""
return self.gc_frequency(relative=True)
@stable(as_of='0.4.0')
def gc_frequency(self, relative=False):
"""Calculate frequency of G's and C's in the sequence.
This calculates the minimum GC frequency, which corresponds to IUPAC
characters G, C, and S (which stands for G or C).
Parameters
----------
relative : bool, optional
If False return the frequency of G, C, and S characters (ie the
count). If True return the relative frequency, ie the proportion
of G, C, and S characters in the sequence. In this case the
sequence will also be degapped before the operation, so gap
characters will not be included when calculating the length of the
sequence.
Returns
-------
int or float
Either frequency (count) or relative frequency (proportion),
depending on `relative`.
See Also
--------
gc_content
Examples
--------
>>> from skbio import DNA
>>> DNA('ACGT').gc_frequency()
2
>>> DNA('ACGT').gc_frequency(relative=True)
0.5
>>> DNA('ACGT--..').gc_frequency(relative=True)
0.5
>>> DNA('--..').gc_frequency(relative=True)
0
`S` means `G` or `C`, so it counts:
>>> DNA('ASST').gc_frequency()
2
Other degenerates don't count:
>>> DNA('RYKMBDHVN').gc_frequency()
0
"""
counts = np.bincount(self._bytes,
minlength=self._number_of_extended_ascii_codes)
gc = counts[self._gc_codes].sum()
if relative:
seq = self.degap()
if len(seq) != 0:
gc /= len(seq)
return gc
_motifs = parent_motifs.copy()
@_motifs("purine-run")
def _motif_purine_run(sequence, min_length, ignore):
"""Identifies purine runs"""
return sequence.find_with_regex("([AGR]{%d,})" % min_length,
ignore=ignore)
@_motifs("pyrimidine-run")
def _motif_pyrimidine_run(sequence, min_length, ignore):
"""Identifies pyrimidine runs"""
return sequence.find_with_regex("([CTUY]{%d,})" % min_length,
ignore=ignore)
|
gregcaporaso/scikit-bio
|
skbio/sequence/_nucleotide_mixin.py
|
Python
|
bsd-3-clause
| 11,243
|
[
"scikit-bio"
] |
518a2c43cd5d3837b14a211e9ac2de6dee32ca12db66d96f15fa9e2fba1191e7
|
# -*- coding: utf-8 -*-
#!/usr/bin/python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007 Thom Sturgill
# Copyright (C) 2007-2009 Brian G. Matherly
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2008 Jason Simanek
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Web Calendar generator.
"""
#------------------------------------------------------------------------
# python modules
#------------------------------------------------------------------------
from functools import partial
import os, codecs, shutil, re, sys
import datetime, calendar
#------------------------------------------------------------------------
# Set up logging
#------------------------------------------------------------------------
import logging
log = logging.getLogger(".WebPage")
#------------------------------------------------------------------------
# GRAMPS module
#------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
ngettext = glocale.translation.ngettext # else "nearby" comments are ignored
from gramps.gen.lib import Date, Name, NameType, Person
from gramps.gen.lib.date import Today
from gramps.gen.const import PROGRAM_NAME, URL_HOMEPAGE, USER_HOME
from gramps.version import VERSION
from gramps.gen.constfunc import win
from gramps.gen.config import config
from gramps.gen.plug.report import Report
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gen.plug.report import MenuReportOptions
from gramps.gen.plug.report import stdoptions
from gramps.gen.plug.menu import (BooleanOption, NumberOption, StringOption,
EnumeratedListOption, FilterOption,
PersonOption, DestinationOption, NoteOption)
from gramps.gen.utils.config import get_researcher
from gramps.gen.utils.alive import probably_alive
from gramps.gen.datehandler import displayer as _dd
from gramps.gen.display.name import displayer as _nd
import gramps.plugins.lib.libholiday as libholiday
from gramps.plugins.lib.libhtml import Html, xml_lang
from gramps.plugins.lib.libhtmlconst import _CHARACTER_SETS, _CC, _COPY_OPTIONS
from gramps.gui.pluginmanager import GuiPluginManager
from gramps.gen.lib.date import gregorian
# import styled notes from
# src/plugins/lib/libhtmlbackend.py
from gramps.plugins.lib.libhtmlbackend import HtmlBackend
#------------------------------------------------------------------------
# constants
#------------------------------------------------------------------------
# full clear line for proper styling
fullclear = Html("div", class_ = "fullclear", inline = True)
# Web page filename extensions
_WEB_EXT = ['.html', '.htm', '.shtml', '.php', '.php3', '.cgi']
# Calendar stylesheet names
_CALENDARSCREEN = 'calendar-screen.css'
_CALENDARPRINT = 'calendar-print.css'
PLUGMAN = GuiPluginManager.get_instance()
CSS = PLUGMAN.process_plugin_data('WEBSTUFF')
#------------------------------------------------------------------------
#
# WebCalReport
#
#------------------------------------------------------------------------
class WebCalReport(Report):
"""
Create WebCalReport object that produces the report.
"""
def __init__(self, database, options, user):
Report.__init__(self, database, options, user)
self._user = user
stdoptions.run_private_data_option(self, options.menu)
# class to do conversion of styled notes to html markup
self._backend = HtmlBackend()
self.options = options
mgobn = lambda name:options.menu.get_option_by_name(name).get_value()
self.html_dir = mgobn('target')
self.title_text = mgobn('title')
filter_option = options.menu.get_option_by_name('filter')
self.filter = filter_option.get_filter()
self.name_format = mgobn('name_format')
self.ext = mgobn('ext')
self.copy = mgobn('cright')
self.css = mgobn('css')
self.country = mgobn('country')
self.start_dow = mgobn('start_dow')
self.multiyear = mgobn('multiyear')
self.start_year = mgobn('start_year')
self.end_year = mgobn('end_year')
self.maiden_name = mgobn('maiden_name')
self.alive = mgobn('alive')
self.birthday = mgobn('birthdays')
self.anniv = mgobn('anniversaries')
self.home_link = mgobn('home_link')
self.month_notes = [mgobn('note_' + month)
for month in ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec']]
self.encoding = mgobn('encoding')
self.fullyear = mgobn('fullyear')
self.makeoneday = mgobn('makeoneday')
# identify researcher name and e-mail address
# as NarrativeWeb already does
researcher = get_researcher()
self.author = researcher.name
if self.author:
self.author = self.author.replace(',,,', '')
self.email = researcher.email
# set to today's date
self.today = Today()
self.warn_dir = True # Only give warning once.
self.link_to_narweb = mgobn('link_to_narweb')
self.narweb_prefix = mgobn('prefix')
# self.calendar is a dict; key is the month number
# Each entry in the dict is also a dict; key is the day number.
# The day dict is a list of things to display for that day.
# These things are: birthdays and anniversaries
self.calendar = {}
calendar.setfirstweekday(dow_gramps2iso[self.start_dow])
def get_note_format(self, note):
"""
will get the note from the database, and will return either the
styled text or plain note
"""
# retrieve the body of the note
note_text = note.get()
# styled notes
htmlnotetext = self.styled_note(note.get_styledtext(),
note.get_format())
text = htmlnotetext or Html("p", note_text)
# return text of the note to its callers
return text
#################################################
# Will produce styled notes for WebCal by using:
# src/plugins/lib/libhtmlbackend.py
#################################################
def styled_note(self, styledtext, format):
"""
styledtext : assumed a StyledText object to write
format : = 0 : Flowed, = 1 : Preformatted
style_name : name of the style to use for default presentation
"""
text = str(styledtext)
if not text:
return ''
s_tags = styledtext.get_tags()
#FIXME: following split should be regex to match \n\s*\n instead?
markuptext = self._backend.add_markup_from_styled(text, s_tags,
split='\n\n')
htmllist = Html("div", id="grampsstylednote")
if format == 1:
#preformatted, retain whitespace.
#so use \n\n for paragraph detection
#FIXME: following split should be regex to match \n\s*\n instead?
htmllist += Html('pre', indent=None, inline = True)
for line in markuptext.split('\n\n'):
htmllist += Html("p")
for realline in line.split('\n'):
htmllist += realline
htmllist += Html('br')
elif format == 0:
#flowed
#FIXME: following split should be regex to match \n\s*\n instead?
for line in markuptext.split('\n\n'):
htmllist += Html("p")
htmllist += line
return htmllist
def copy_file(self, from_fname, to_fname, to_dir = ''):
"""
Copy a file from a source to a (report) destination.
If to_dir is not present and if the target is not an archive,
then the destination directory will be created.
Normally 'to_fname' will be just a filename, without directory path.
'to_dir' is the relative path name in the destination root. It will
be prepended before 'to_fname'.
"""
dest = os.path.join(self.html_dir, to_dir, to_fname)
destdir = os.path.dirname(dest)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if from_fname != dest:
shutil.copyfile(from_fname, dest)
elif self.warn_dir:
self._user.warn(
_("Possible destination error") + "\n" +
_("You appear to have set your target directory "
"to a directory used for data storage. This "
"could create problems with file management. "
"It is recommended that you consider using "
"a different directory to store your generated "
"web pages."))
self.warn_dir = False
config.set('paths.website-directory',
os.path.dirname(self.html_dir) + os.sep)
def add_day_item(self, text, year, month, day, event):
"""
adds birthdays, anniversaries, and holidays to their perspective lists
text -- line to be added
year, month, day -- date to add the text to
event -- one of 'BirthDay', 'Anniversary', or 'Holiday'
"""
# This may happen for certain "about" dates.
# Use first day of the month
if day == 0:
day = 1
# determine which dictionary to use???
if event in ['Birthday', 'Anniversary']:
month_dict = self.calendar.get(month, {})
else:
month_dict = self.holidays.get(month, {})
day_list = month_dict.get(day, [])
if month > 0:
try:
event_date = Date(year, month, day)
except ValueError:
event_date = Date.EMPTY
else:
event_date = Date.EMPTY # Incomplete date....
day_list.append((text, event, event_date))
month_dict[day] = day_list
# determine which dictionary to add it to???
if event in ['Birthday', 'Anniversary']:
self.calendar[month] = month_dict
else:
self.holidays[month] = month_dict
def __get_holidays(self, year):
# _('translation')
with self._user.progress(_("Web Calendar Report"),
(_('Calculating Holidays for year %04d') % year),
365) as step:
""" Get the holidays for the specified country and year """
holiday_table = libholiday.HolidayTable()
country = holiday_table.get_countries()[self.country]
holiday_table.load_holidays(year, country)
for month in range(1, 13):
for day in range(1, 32):
holiday_names = holiday_table.get_holidays(month, day)
for holiday_name in holiday_names:
self.add_day_item(holiday_name, year, month, day, 'Holiday')
step()
def copy_calendar_files(self):
"""
Copies all the necessary stylesheets and images for these calendars
"""
# Copy the screen stylesheet
if self.css and self.css != 'No style sheet':
fname = CSS[self.css]["filename"]
self.copy_file(fname, _CALENDARSCREEN, "css")
# copy Navigation Menu Layout if Blue or Visually is being used
if CSS[self.css]["navigation"]:
# copy horizontal menus...
fname = CSS["Horizontal-Menus"]["filename"]
self.copy_file(fname, "calendar-menus.css", "css")
# copy print stylesheet
fname = CSS["Print-Default"]["filename"]
self.copy_file(fname, _CALENDARPRINT, "css")
imgs = []
# Mainz stylesheet graphics
# will only be used if Mainz is slected as the stylesheet
imgs += CSS[self.css]["images"]
# copy copyright image
# the proper way would be to call "filename", but it is NOT working...
if 0 < self.copy <= len(_CC):
imgs += [CSS["Copyright"]["filename"]]
# copy Gramps favicon #2
imgs += [CSS["favicon2"]["filename"]]
for from_path in imgs:
fdir, fname = os.path.split(from_path)
self.copy_file(from_path, fname, "images")
def create_file(self, fname, subdir):
"""
Create a file in the html_dir tree.
If the directory does not exist, create it.
fname -- filename to be created
subdir -- any subdirs to be added
"""
fname = os.path.join(self.html_dir, subdir, fname)
if not _has_webpage_extension(fname):
fname += self.ext
destdir = os.path.dirname(fname)
if not os.path.isdir(destdir):
os.makedirs(destdir)
if sys.version_info[0] < 3:
# In python 2.x, the data written by of.write() is genarally of
# type 'str' (i.e. 8-bit strings), except for cases where (at
# least) one of the objects being converted by a '%' operator is
# unicode (e.g. the "Generated by" line or the _META3 line), in
# which case the data being written is of type 'unicode' (See
# http://docs.python.org/2/library/stdtypes.html#string-
# formatting). The data written to the file is encoded according
# to self.encoding
of = codecs.EncodedFile(open(fname, 'w'), 'utf-8',
self.encoding, 'xmlcharrefreplace')
else:
# In python 3, the data that is written by of.write() is always
# of type 'str' (i.e. unicode text).
of = open(fname, 'w', encoding=self.encoding,
errors='xmlcharrefreplace')
return of
def close_file(self, of):
""" will close whatever filename is passed to it """
of.close()
def write_header(self, nr_up, title, body_id = None, add_print = True):
"""
This creates the header for the Calendars
'nr_up' - number of directory levels up, started from current page, to the
root of the directory tree (i.e. to self.html_dir).
title -- to be inserted into page header section
add_print -- whether to add printer stylesheet or not
* only webcalendar() and one_day() only!
"""
# number of subdirectories up to reach root
subdirs = ['..'] * nr_up
# Header contants
xmllang = xml_lang()
_META1 = 'name ="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=1"'
_META2 = 'name ="apple-mobile-web-app-capable" content="yes"'
_META3 = 'name="generator" content="%s %s %s"' % (
PROGRAM_NAME, VERSION, URL_HOMEPAGE)
_META4 = 'name="author" content="%s"' % self.author
# create additional meta tags
meta = Html("meta", attr = _META1) + (
Html("meta", attr = _META2, indent = False),
Html("meta", attr = _META3, indent =False),
Html("meta", attr = _META4, indent = False)
)
# begin each html page...
page, head, body = Html.page(title,
self.encoding,
xmllang)
# add body id tag if not None
if body_id is not None:
body.attr = "id = '%(idtag)s'" % { 'idtag' : body_id }
# GRAMPS favicon
fname1 = "/".join(subdirs + ["images", "favicon2.ico"])
# _CALENDARSCREEN stylesheet
fname2 = "/".join(subdirs + ["css", _CALENDARSCREEN])
# links for GRAMPS favicon and stylesheets
links = Html("link", rel = 'shortcut icon', href = fname1, type = "image/x-icon") + (
Html("link",href = fname2, type = "text/css", media = "screen", rel = "stylesheet", indent = False)
)
# add horizontal menu if css == Blue or Visually because there is no menus?
if CSS[self.css]["navigation"]:
fname = "/".join(subdirs + ["css", "calendar-menus.css"])
links.extend(
Html("link", href = fname, type = "text/css", media = "screen", rel = "stylesheet", indent = False)
)
# add printer stylesheet to webcalendar() and one_day() only
if add_print:
fname = "/".join(subdirs + ["css", _CALENDARPRINT])
links.extend(
Html("link",href = fname,type = "text/css", media = "print", rel = "stylesheet", indent = False)
)
# add meta tags and links to head section
head += (meta, links)
# start header section and page title...
with Html("div", id = "header", role = "Title-n-Navigation") as header:
header += Html("h1", title, id = "SiteTitle", inline = True)
# Created for ?
msg = None
if self.author and self.email:
msg = _('the "WebCal" will be the potential-email Subject|'
'Created for %(html_email_author_start)s'
'WebCal%(html_email_author_end)s') % {
'html_email_author_start' :
'<a href="mailto:' + self.email + '?subject=' ,
'html_email_author_end' :
'">' + self.author + '</a>' }
elif self.author:
msg = _('Created for %(author)s') % {'author' : self.author}
if msg:
header += Html("p", msg, id = "CreatorInfo")
body += header
return page, body
def year_navigation(self, nr_up, currentsection):
"""
This will create the year navigation menu bar for a total of seventeen (17) years
nr_up = number of directories up to reach root directory
currentsection = proper styling of this navigation bar
"""
# limit number of years to eighteen (18) years and only one row of years
nyears = ((self.end_year - self.start_year) + 1)
num_years = nyears if 0 < nyears < 19 else 18
# begin year division and begin unordered list
with Html("div", id = "subnavigation", role = "subnavigation") as submenu:
unordered = Html("ul")
for cal_year in range(self.start_year, (self.start_year + num_years)):
url = ''
# begin subdir level
subdirs = ['..'] * nr_up
subdirs.append(str(cal_year))
# each year will link to current month.
# this will always need an extension added
full_month_name = _dd.long_months[self.today.get_month() ]
# Note. We use '/' here because it is a URL, not a OS dependent
# pathname.
url = '/'.join(subdirs + [full_month_name]) + self.ext
hyper = Html("a", str(cal_year), href = url, title = str(cal_year))
# Figure out if we need <li class="CurrentSection"> or just plain <li>
check_cs = str(cal_year) == currentsection and 'class = "CurrentSection"' or False
if check_cs:
unordered.extend(
Html("li", hyper, attr = check_cs, inline = True)
)
else:
unordered.extend(
Html("li", hyper, inline = True)
)
submenu += unordered
return submenu
def month_navigation(self, nr_up, year, currentsection, add_home):
"""
Will create and display the navigation menu bar
of = calendar filename being created
nr_up = number of directories up to reach root directory
year = year being created
currentsection = month name being created for proper CSS styling
use_home = if creating a link to home
-- a link to root directory of website
"""
navs = []
# An optional link to a home page
if self.home_link:
navs.append((self.home_link, _('Home'), add_home))
navs.extend(
(_dd.long_months[month], _dd.short_months[month], True) for month in range(1, 13) )
# Add a link for year_glance() if requested
navs.append(('fullyearlinked', _('Year Glance'), self.fullyear))
# remove menu items if they are not True
navs = [(u, n) for u, n, c in navs if c]
# begin month subnavigation
with Html("div", class_ = "wrapper", id = "nav", role = "navigation") as navigation:
with Html("div", class_ = "container") as container:
unordered = Html("ul", class_ = "menu")
for url_fname, nav_text in navs:
# Note. We use '/' here because it is a URL, not a OS dependent pathname
# need to leave home link alone, so look for it ...
url = url_fname
add_subdirs = True
if not (url.startswith('http:') or url.startswith('/')):
add_subdirs = not any(url.endswith(ext)
for ext in _WEB_EXT)
# whether to add subdirs or not???
if add_subdirs:
subdirs = ['..'] * nr_up
subdirs.append(str(year))
url = '/'.join(subdirs + [url_fname])
if not _has_webpage_extension(url):
url += self.ext
# Figure out if we need <li class="CurrentSection"> or just plain <li>
check_cs = url_fname == currentsection and 'class = "CurrentSection"' or False
if url == self.home_link:
myTitle = _("NarrativeWeb Home")
elif url_fname == 'fullyearlinked':
myTitle = _('Full year at a Glance')
else:
myTitle = _(url_fname)
hyper = Html("a", nav_text, href = url, name = url_fname, title = myTitle)
if check_cs:
unordered.extend(
Html("li", hyper, attr = check_cs, inline = True)
)
else:
unordered.extend(
Html("li", hyper, inline = True)
)
container += unordered
navigation += container
return navigation
def calendar_build(self, cal, year, month):
"""
This does the work of building the calendar
@param: cal - either "yg" year_glance(), or "wc" webcalendar()
@param: year -- year being created
@param: month - month number 1, 2, .., 12
"""
# define names for long and short month names
full_month_name = _dd.long_months[month]
abbr_month_name = _dd.short_months[month]
# dow (day-of-week) uses Gramps numbering, sunday => 1, etc
start_dow = self.start_dow
col2day = [(x-1)%7+1 for x in range(start_dow, start_dow + 7)]
def get_class_for_daycol(col):
""" Translate a Gramps day number into a HTMLclass """
day = col2day[col]
if day == 1:
return "weekend sunday"
elif day == 7:
return "weekend saturday"
return "weekday"
def get_name_for_daycol(col):
""" Translate a Gramps day number into a HTMLclass """
day = col2day[col]
return day_names[day]
# Note. gen.datehandler has sunday => 1, monday => 2, etc
# We slice out the first empty element.
day_names = _dd.long_days # use self._dd.long_days when set_locale is used...
def __get_previous_month_day(year, month, day_col):
if month == 1:
prevmonth = calendar.monthcalendar((year - 1), 12)
else:
prevmonth = calendar.monthcalendar(year, (month - 1))
num_weeks = len(prevmonth)
lastweek_prevmonth = prevmonth[(num_weeks - 1)]
previous_month_day = lastweek_prevmonth[day_col]
# return previous month day number based on day_col
# day_col is based on range(0 - 6)
return previous_month_day
def __get_next_month_day(year, month, day_col):
if month == 12:
nextmonth = calendar.monthcalendar((year + 1), 1)
else:
nextmonth = calendar.monthcalendar(year, (month + 1))
firstweek_nextmonth = nextmonth[0]
next_month_day = firstweek_nextmonth[day_col]
# return next month day number based on day_col
# day_col is based on range(0 - 6)
return next_month_day
# Begin calendar head. We'll use the capitalized name, because here it
# seems appropriate for most countries.
month_name = full_month_name.capitalize()
th_txt = month_name
if cal == 'wc': # webcalendar()
if not self.multiyear:
th_txt = '%s %04d' % (month_name, year)
# begin calendar table and table head
with Html("table", class_ = "calendar", id = month_name, role = "Calendar-Grid") as table:
thead = Html("thead")
table += thead
trow = Html("tr") + (
Html("th", th_txt, class_ ='monthName', colspan=7, inline = True)
)
thead += trow
# Calendar weekday names header
trow = Html("tr")
thead += trow
for day_col in range(7):
dayclass = get_class_for_daycol(day_col)
dayname = get_name_for_daycol(day_col)
trow += Html("th", class_ =dayclass, inline = True) + (
Html('abbr', dayname[0], title = dayname) )
# begin table body
tbody = Html("tbody")
table += tbody
# get first of the month and month information
current_date, current_ord, monthinfo = get_first_day_of_month(year, month)
# begin calendar table rows, starting week0
nweeks = len(monthinfo)
for week_row in range(0, nweeks):
week = monthinfo[week_row]
# if you look this up in wikipedia, the first week is called week0
trow = Html("tr", class_ = "week%02d" % week_row)
tbody += trow
# begin calendar day column
for day_col in range(0, 7):
dayclass = get_class_for_daycol(day_col)
# day number, can also be a zero -- a day before or after month
day = week[day_col]
# start the beginning variable for <td>, table cell
tcell_id = "%s%02d" % (abbr_month_name, day)
# add calendar date division
datediv = Html("div", day, class_ = "date", inline = True)
### a day in the previous or next month ###
if day == 0:
# day in previous/ next month
specday = __get_previous_month_day(year, month, day_col) if week_row == 0 \
else __get_next_month_day(year, month, day_col)
specclass = "previous " if week_row == 0 else "next "
specclass += dayclass
# continue table cell, <td>, without id tag
tcell = Html("td", class_ = specclass, inline = True) + (
Html("div", specday, class_ = "date", inline = True) )
# normal day number in current month
else:
thisday = datetime.date.fromordinal(current_ord)
# Something this month
if thisday.month == month:
holiday_list = self.holidays.get(month, {}).get(thisday.day, [])
bday_anniv_list = self.calendar.get(month, {}).get(thisday.day, [])
# date is an instance because of subtracting abilities in date.py
event_date = Date(thisday.year, thisday.month, thisday.day)
# get events for this day
day_list = get_day_list(event_date, holiday_list, bday_anniv_list)
# is there something this day?
if day_list:
hilightday = 'highlight ' + dayclass
tcell = Html("td", id = tcell_id, class_ = hilightday)
# Year at a Glance
if cal == "yg":
# make one day pages and hyperlink
if self.makeoneday:
# create yyyymmdd date string for
# "One Day" calendar page filename
fname_date = '%04d%02d%02d' % (year,month,day) + self.ext
# create hyperlink to one_day()
tcell += Html("a", datediv, href = fname_date, inline = True)
# only year_glance() needs this to create the one_day() pages
self.one_day(event_date, fname_date, day_list)
# just year_glance(), but no one_day() pages
else:
# continue table cell, <td>, without id tag
tcell = Html("td", class_ = hilightday, inline = True) + (
# adds date division
Html("div", day, class_ = "date", inline = True)
)
# WebCal
else:
# add date to table cell
tcell += datediv
# list the events
unordered = Html("ul")
tcell += unordered
for nyears, date, text, event in day_list:
unordered += Html("li", text, inline = False
if event == 'Anniversary' else True)
# no events for this day
else:
# create empty day with date
tcell = Html("td", class_ = dayclass, inline = True) + (
# adds date division
Html("div", day, class_ = "date", inline = True)
)
# nothing for this month
else:
tcell = Html("td", class_ = dayclass) + (
# adds date division
Html("div", day, class_ = "date", inline = True)
)
# attach table cell to table row
# close the day column
trow += tcell
# change day number
current_ord += 1
if cal == "yg":
for weeks in range(nweeks, 6):
# each calendar must have six weeks for proper styling and alignment
with Html("tr", class_ = "week%02d" % (weeks + 1)) as six_weeks:
tbody += six_weeks
for emptydays in range(7):
six_weeks += Html("td", class_ = "emptyDays", inline = True)
# return calendar table to its callers
return table
def webcalendar(self, year):
"""
This method provides information and header/ footer to the calendar month
year -- year being created
"""
# do some error correcting if needed
if self.multiyear:
if self.end_year < self.start_year:
self.end_year = self.start_year
nr_up = 1 # Number of directory levels up to get to self.html_dir / root
with self._user.progress(_("Web Calendar Report"),
_('Formatting months ...'), 12) as step:
for month in range(1, 13):
cal_fname = _dd.long_months[month]
of = self.create_file(cal_fname, str(year))
# Add xml, doctype, meta and stylesheets
# body has already been added to webcal already once
webcal, body = self.write_header(nr_up, self.title_text)
# create Year Navigation menu
if (self.multiyear and ((self.end_year - self.start_year) > 0)):
body += self.year_navigation(nr_up, str(year))
# Create Month Navigation Menu
# identify currentsection for proper highlighting
currentsection = _dd.long_months[month]
body += self.month_navigation(nr_up, year, currentsection, True)
# build the calendar
content = Html("div", class_="content", id = "WebCal")
body += content
monthly_calendar = self.calendar_build("wc", year, month)
content += monthly_calendar
# create note section for webcalendar()
# One has to be minused because the array starts at zero, but January =1
note = self.month_notes[month-1].strip()
if note:
note = self.database.get_note_from_gramps_id(note)
note = self.get_note_format(note)
# table foot section
cal_foot = Html("tfoot")
monthly_calendar += cal_foot
trow = Html("tr") + (
Html("td", note, colspan=7, inline = True)
)
cal_foot += trow
# create blank line for stylesheets
# create footer division section
footer = self.write_footer(nr_up)
body += (fullclear, footer)
# send calendar page to web output
# and close the file
self.XHTMLWriter(webcal, of)
step()
def year_glance(self, year):
"""
This method will create the Full Year At A Glance Page...
year -- year being created
"""
nr_up = 1 # Number of directory levels up to get to root
# generate progress pass for "Year At A Glance"
with self._user.progress(_("Web Calendar Report"),
_('Creating Year At A Glance calendar'), 12) as step:
of = self.create_file('fullyearlinked', str(year))
# page title
title = _("%(year)d, At A Glance") % {'year' : year}
# Create page header
# body has already been added to yearglance already once
yearglance, body = self.write_header(nr_up, title, "fullyearlinked", False)
# create Year Navigation menu
if (self.multiyear and ((self.end_year - self.start_year) > 0)):
body += self.year_navigation(nr_up, str(year))
# Create Month Navigation Menu
# identify currentsection for proper highlighting
body += self.month_navigation(nr_up, year, "fullyearlinked", True)
msg = (_('This calendar is meant to give you access '
'to all your data at a glance compressed into one page. Clicking '
'on a date will take you to a page that shows all the events for '
'that date, if there are any.\n'))
# page description
content = Html("div", class_ = "content", id = "YearGlance")
body += content
content += Html("p", msg, id='description')
for month in range(1, 13):
# build the calendar
monthly_calendar = self.calendar_build("yg", year, month)
content += monthly_calendar
# increase progress bar
step()
# create blank line for stylesheets
# write footer section
footer = self.write_footer(nr_up)
body += (fullclear, footer)
# send calendar page to web output
# and close the file
self.XHTMLWriter(yearglance, of)
def one_day(self, event_date, fname_date, day_list):
"""
This method creates the One Day page for "Year At A Glance"
event_date -- date for the listed events
fname_date -- filename date from calendar_build()
day_list - a combination of both dictionaries to be able to create one day
nyears, date, text, event --- are necessary for figuring the age or years married
for each year being created...
"""
nr_up = 1 # number of directory levels up to get to root
# get year and month from event_date for use in this section
year = event_date.get_year()
month = event_date.get_month()
od = self.create_file(fname_date, str(year))
# page title
title = _('One Day Within A Year')
# create page header
oneday, body = self.write_header(nr_up, title, "OneDay")
# create Year Navigation menu
if (self.multiyear and ((self.end_year - self.start_year) > 0)):
body += self.year_navigation(nr_up, str(year))
# Create Month Navigation Menu
# identify currentsection for proper highlighting
currentsection = _dd.long_months[month]
body += self.month_navigation(nr_up, year, currentsection, True)
# set date display as in user prevferences
content = Html("div", class_="content", id = "OneDay")
body += content
content += Html("h3", _dd.display(event_date), inline = True)
# list the events
ordered = Html("ol")
content += ordered
for nyears, date, text, event in day_list:
ordered += Html("li", text, inline = False if event == 'Anniversary' else True)
# create blank line for stylesheets
# write footer section
footer = self.write_footer(nr_up)
body += (fullclear, footer)
# send calendar page to web output
# and close the file
self.XHTMLWriter(oneday, od)
def build_url_fname_html(self, fname, subdir=None, prefix=None):
return self.build_url_fname(fname, subdir, prefix) + self.ext
def build_url_fname(self, fname, subdir, prefix = None):
"""
Create part of the URL given the filename and optionally the subdirectory.
If the subdirectory is given, then two extra levels of subdirectory are inserted
between 'subdir' and the filename. The reason is to prevent directories with
too many entries.
If 'prefix' is set, then is inserted in front of the result.
The extension is added to the filename as well.
Notice that we do NOT use os.path.join() because we're creating a URL.
Imagine we run gramps on Windows (heaven forbits), we don't want to
see backslashes in the URL.
"""
if win():
fname = fname.replace('\\',"/")
subdirs = self.build_subdirs(subdir, fname)
return (prefix or '') + "/".join(subdirs + [fname])
def build_subdirs(self, subdir, fname):
"""
If subdir is given, then two extra levels of subdirectory are inserted
between 'subdir' and the filename. The reason is to prevent directories with
too many entries.
For example, this may return ['ppl', '8', '1'] given 'ppl', "aec934857df74d36618"
"""
subdirs = []
if subdir:
subdirs.append(subdir)
subdirs.append(fname[-1].lower())
subdirs.append(fname[-2].lower())
return subdirs
def get_name(self, person, maiden_name = None):
"""
Return person's name, unless maiden_name given, unless married_name
listed.
person -- person to get short name from
maiden_name -- either a woman's maiden name or man's surname
"""
# Get all of a person's names:
primary_name = person.primary_name
married_name = None
names = [primary_name] + person.get_alternate_names()
for name in names:
if int(name.get_type()) == NameType.MARRIED:
married_name = name
break
# Now, decide which to use:
if maiden_name is not None:
if married_name is not None:
name = Name(married_name)
else:
name = Name(primary_name)
surname_obj = name.get_primary_surname()
surname_obj.set_surname(maiden_name)
else:
name = Name(primary_name)
name.set_display_as(self.name_format)
return _nd.display_name(name)
def collect_data(self, this_year):
"""
This method runs through the data, and collects the relevant dates
and text.
"""
db = self.database
people = db.iter_person_handles()
with self._user.progress(_("Web Calendar Report"),
_('Applying Filter...'),
db.get_number_of_people()) as step:
people = self.filter.apply(db, people, step)
with self._user.progress(_("Web Calendar Report"),
_("Reading database..."), len(people)) as step:
for person in map(db.get_person_from_handle, people):
step()
family_list = person.get_family_handle_list()
birth_ref = person.get_birth_ref()
birth_date = Date.EMPTY
if birth_ref:
birth_event = db.get_event_from_handle(birth_ref.ref)
birth_date = birth_event.get_date_object()
# determine birthday information???
if (self.birthday and birth_date is not Date.EMPTY and birth_date.is_valid()):
birth_date = gregorian(birth_date)
year = birth_date.get_year() or this_year
month = birth_date.get_month()
day = birth_date.get_day()
# date to figure if someone is still alive
# current year of calendar, month nd day is their birth month and birth day
prob_alive_date = Date(this_year, month, day)
# add some things to handle maiden name:
father_surname = None # husband, actually
if person.gender == Person.FEMALE:
# get husband's last name:
if self.maiden_name in ['spouse_first', 'spouse_last']:
if family_list:
if self.maiden_name == 'spouse_first':
fhandle = family_list[0]
else:
fhandle = family_list[-1]
fam = db.get_family_from_handle(fhandle)
father_handle = fam.get_father_handle()
mother_handle = fam.get_mother_handle()
if mother_handle == person.handle:
if father_handle:
father = db.get_person_from_handle(father_handle)
if father is not None:
father_surname = _get_regular_surname(person.gender,
father.get_primary_name())
short_name = self.get_name(person, father_surname)
alive = probably_alive(person, db, prob_alive_date)
if (self.alive and alive) or not self.alive:
# add link to NarrativeWeb
if self.link_to_narweb:
text = str(Html("a", short_name,
href = self.build_url_fname_html(person.handle, "ppl",
prefix = self.narweb_prefix)))
else:
text = short_name
self.add_day_item(text, year, month, day, 'Birthday')
# add anniversary if requested
if self.anniv:
for fhandle in family_list:
fam = db.get_family_from_handle(fhandle)
father_handle = fam.get_father_handle()
mother_handle = fam.get_mother_handle()
if father_handle == person.handle:
spouse_handle = mother_handle
else:
continue # with next person if this was the marriage event
if spouse_handle:
spouse = db.get_person_from_handle(spouse_handle)
if spouse:
spouse_name = self.get_name(spouse)
short_name = self.get_name(person)
# will return a marriage event or False if not married any longer
marriage_event = get_marriage_event(db, fam)
if marriage_event:
event_date = marriage_event.get_date_object()
if event_date is not Date.EMPTY and event_date.is_valid():
event_date = gregorian(event_date)
year = event_date.get_year()
month = event_date.get_month()
day = event_date.get_day()
# date to figure if someone is still alive
prob_alive_date = Date(this_year, month, day)
if self.link_to_narweb:
spouse_name = str(Html("a", spouse_name,
href = self.build_url_fname_html(spouse_handle, 'ppl',
prefix = self.narweb_prefix)))
short_name = str(Html("a", short_name,
href = self.build_url_fname_html(person.handle, 'ppl',
prefix = self.narweb_prefix)))
alive1 = probably_alive(person, db, prob_alive_date)
alive2 = probably_alive(spouse, db, prob_alive_date)
if ((self.alive and alive1 and alive2) or not self.alive):
text = _('%(spouse)s and %(person)s') % {
'spouse' : spouse_name,
'person' : short_name}
self.add_day_item(text, year, month, day, 'Anniversary')
def write_footer(self, nr_up):
"""
Writes the footer section of the pages
'nr_up' - number of directory levels up, started from current page, to the
root of the directory tree (i.e. to self.html_dir).
"""
# begin calendar footer
with Html("div", id = "footer", role = "Footer-End") as footer:
# Display date as user set in preferences
msg = _('Generated by %(gramps_home_html_start)s'
'Gramps%(html_end)s on %(date)s') % {
'gramps_home_html_start' :
'<a href="' + URL_HOMEPAGE + '">' ,
'html_end' : '</a>' ,
'date' : _dd.display(Today()) }
footer += Html("p", msg, id = 'createdate')
copy_nr = self.copy
text = ''
if copy_nr == 0:
if self.author:
text = "© %s %s" % (self.today.get_year(), self.author)
elif 0 < copy_nr < len(_CC):
subdirs = ['..'] * nr_up
# Note. We use '/' here because it is a URL, not a OS dependent pathname
fname = '/'.join(subdirs + ['images'] + ['somerights20.gif'])
text = _CC[copy_nr] % {'gif_fname' : fname}
else:
text = "© %s %s" % (self.today.get_year(), self.author)
footer += Html("p", text, id = 'copyright')
# return footer to its callers
return footer
def XHTMLWriter(self, page, of):
"""
This function is simply to make the web page look pretty and readable
It is not for the browser, but for us, humans
"""
# writes the file out from the page variable; Html instance
# This didn't work for some reason, but it does in NarWeb:
#page.write(partial(print, file=of.write))
page.write(lambda line: of.write(line + '\n'))
# close the file now...
self.close_file(of)
def write_report(self):
"""
The short method that runs through each month and creates a page.
"""
# get data from database for birthdays/ anniversaries
self.collect_data(self.start_year)
# Copy all files for the calendars being created
self.copy_calendar_files()
if self.multiyear:
# limit number of years to eighteen (18) years and only one row of years
nyears = ((self.end_year - self.start_year) + 1)
num_years = nyears if 0 < nyears < 19 else 18
for cal_year in range(self.start_year, (self.start_year + num_years)):
# initialize the holidays dict to fill:
self.holidays = {}
# get the information, zero is equal to None
if self.country != 0:
self.__get_holidays(cal_year)
# create webcalendar() calendar pages
self.webcalendar(cal_year)
# create "Year At A Glance" and
# "One Day" calendar pages
if self.fullyear:
self.year_glance(cal_year)
# a single year
else:
cal_year = self.start_year
self.holidays = {}
# get the information, first from holidays:
if self.country != 0:
self.__get_holidays(cal_year)
# create webcalendar() calendar pages
self.webcalendar(cal_year)
# create "Year At A Glance" and
# "One Day" calendar pages
if self.fullyear:
self.year_glance(cal_year)
# ---------------------------------------------------------------------------------------
# WebCalOptions; Creates the Menu
#----------------------------------------------------------------------------------------
class WebCalOptions(MenuReportOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name, dbase):
self.__db = dbase
self.__pid = None
self.__filter = None
self.__links = None
self.__prefix = None
MenuReportOptions.__init__(self, name, dbase)
def add_menu_options(self, menu):
"""
Add options to the menu for the web calendar.
"""
self.__add_report_options(menu)
self.__add_content_options(menu)
self.__add_notes_options(menu)
self.__add_advanced_options(menu)
def __add_report_options(self, menu):
"""
Options on the "Report Options" tab.
"""
category_name = _("Report Options")
dbname = self.__db.get_dbname()
default_dir = dbname + "_WEBCAL"
target = DestinationOption( _("Destination"),
os.path.join(config.get('paths.website-directory'),
default_dir))
target.set_help( _("The destination directory for the web files"))
target.set_directory_entry(True)
menu.add_option(category_name, "target", target)
title = StringOption(_('Calendar Title'), _('My Family Calendar'))
title.set_help(_("The title of the calendar"))
menu.add_option(category_name, "title", title)
self.__filter = FilterOption(_("Filter"), 0)
self.__filter.set_help(
_("Select filter to restrict people that appear on calendar"))
menu.add_option(category_name, "filter", self.__filter)
self.__filter.connect('value-changed', self.__filter_changed)
self.__pid = PersonOption(_("Filter Person"))
self.__pid.set_help(_("The center person for the filter"))
menu.add_option(category_name, "pid", self.__pid)
self.__pid.connect('value-changed', self.__update_filters)
self.__update_filters()
# We must figure out the value of the first option before we can
# create the EnumeratedListOption
fmt_list = _nd.get_name_format()
defaultnum = _nd.get_default_format()
default = 0
for ind,val in enumerate(fmt_list):
if val[0] == defaultnum:
default = ind
break
name_format = EnumeratedListOption(_("Name format"),
fmt_list[default][0])
for num, name, fmt_str, act in fmt_list:
name_format.add_item(num, name)
name_format.set_help(_("Select the format to display names"))
menu.add_option(category_name, "name_format", name_format)
ext = EnumeratedListOption(_("File extension"), ".html" )
for etype in _WEB_EXT:
ext.add_item(etype, etype)
ext.set_help( _("The extension to be used for the web files"))
menu.add_option(category_name, "ext", ext)
cright = EnumeratedListOption(_('Copyright'), 0 )
for index, copt in enumerate(_COPY_OPTIONS):
cright.add_item(index, copt)
cright.set_help( _("The copyright to be used for the web files"))
menu.add_option(category_name, "cright", cright)
css_list = sorted([(CSS[key]["translation"], CSS[key]["id"])
for key in list(CSS.keys())
if CSS[key]["user"]])
css = EnumeratedListOption(_('StyleSheet'), css_list[0][1])
for css_item in css_list:
css.add_item(css_item[1], css_item[0])
css.set_help( _('The stylesheet to be used for the web pages'))
menu.add_option(category_name, "css", css)
def __add_content_options(self, menu):
"""
Options on the "Content Options" tab.
"""
category_name = _("Content Options")
# set to today's date for use in menu, etc.
today = Today()
self.__multiyear = BooleanOption(_('Create multiple year calendars'), False)
self.__multiyear.set_help(_('Whether to create Multiple year calendars or not.'))
menu.add_option(category_name, 'multiyear', self.__multiyear)
self.__multiyear.connect('value-changed', self.__multiyear_changed)
self.__start_year = NumberOption(_('Start Year for the Calendar(s)'), today.get_year(),
1900, 3000)
self.__start_year.set_help(_('Enter the starting year for the calendars '
'between 1900 - 3000'))
menu.add_option(category_name, 'start_year', self.__start_year)
self.__end_year = NumberOption(_('End Year for the Calendar(s)'), today.get_year(),
1900, 3000)
self.__end_year.set_help(_('Enter the ending year for the calendars '
'between 1900 - 3000.'))
menu.add_option(category_name, 'end_year', self.__end_year)
self.__multiyear_changed()
country = EnumeratedListOption(_('Country for holidays'), 0 )
holiday_table = libholiday.HolidayTable()
countries = holiday_table.get_countries()
countries.sort()
if (len(countries) == 0 or
(len(countries) > 0 and countries[0] != '')):
countries.insert(0, '')
count = 0
for c in countries:
country.add_item(count, c)
count += 1
country.set_help(_("Holidays will be included for the selected "
"country"))
menu.add_option(category_name, "country", country)
maiden_name = EnumeratedListOption(_("Birthday surname"), "own")
maiden_name.add_item('spouse_first', _("Wives use husband's surname "
"(from first family listed)"))
maiden_name.add_item('spouse_last', _("Wives use husband's surname "
"(from last family listed)"))
maiden_name.add_item("own", _("Wives use their own surname"))
maiden_name.set_help(_("Select married women's displayed surname"))
menu.add_option(category_name, "maiden_name", maiden_name)
# Default selection ????
start_dow = EnumeratedListOption(_("First day of week"), 1)
for count in range(1, 8):
start_dow.add_item(count, _dd.long_days[count].capitalize())
start_dow.set_help(_("Select the first day of the week for the calendar"))
menu.add_option(category_name, "start_dow", start_dow)
dbname = self.__db.get_dbname()
default_link = '../../' + dbname + "_NAVWEB/index.html"
home_link = StringOption(_('Home link'), default_link)
home_link.set_help(_("The link to be included to direct the user to "
"the main page of the web site"))
menu.add_option(category_name, "home_link", home_link)
stdoptions.add_private_data_option(menu, category_name, default=False)
alive = BooleanOption(_("Include only living people"), True)
alive.set_help(_("Include only living people in the calendar"))
menu.add_option(category_name, "alive", alive)
birthdays = BooleanOption(_("Include birthdays"), True)
birthdays.set_help(_("Include birthdays in the calendar"))
menu.add_option(category_name, "birthdays", birthdays)
anniversaries = BooleanOption(_("Include anniversaries"), True)
anniversaries.set_help(_("Include anniversaries in the calendar"))
menu.add_option(category_name, "anniversaries", anniversaries)
def __add_notes_options(self, menu):
"""
Options on the "Months Notes" tabs.
"""
category_name = _("Jan - Jun Notes")
note_jan = NoteOption(_('January Note'))
note_jan.set_help(_("The note for the month of January"))
menu.add_option(category_name, "note_jan", note_jan)
note_feb = NoteOption(_('February Note'))
note_feb.set_help(_("The note for the month of February"))
menu.add_option(category_name, "note_feb", note_feb)
note_mar = NoteOption(_('March Note'))
note_mar.set_help(_("The note for the month of March"))
menu.add_option(category_name, "note_mar", note_mar)
note_apr = NoteOption(_('April Note'))
note_apr.set_help(_("The note for the month of April"))
menu.add_option(category_name, "note_apr", note_apr)
note_may = NoteOption(_('May Note'))
note_may.set_help(_("The note for the month of May"))
menu.add_option(category_name, "note_may", note_may)
note_jun = NoteOption(_('June Note'))
note_jun.set_help(_("The note for the month of June"))
menu.add_option(category_name, "note_jun", note_jun)
category_name = _("Jul - Dec Notes")
note_jul = NoteOption(_('July Note'))
note_jul.set_help(_("The note for the month of July"))
menu.add_option(category_name, "note_jul", note_jul)
note_aug = NoteOption(_('August Note'))
note_aug.set_help(_("The note for the month of August"))
menu.add_option(category_name, "note_aug", note_aug)
note_sep = NoteOption(_('September Note'))
note_sep.set_help(_("The note for the month of September"))
menu.add_option(category_name, "note_sep", note_sep)
note_oct = NoteOption(_('October Note'))
note_oct.set_help(_("The note for the month of October"))
menu.add_option(category_name, "note_oct", note_oct)
note_nov = NoteOption(_('November Note'))
note_nov.set_help(_("The note for the month of November"))
menu.add_option(category_name, "note_nov", note_nov)
note_dec = NoteOption(_('December Note'))
note_dec.set_help(_("The note for the month of December"))
menu.add_option(category_name, "note_dec", note_dec)
def __add_advanced_options(self, menu):
"""
Options for the advanced menu
"""
category_name = _('Advanced Options')
encoding = EnumeratedListOption(_('Character set encoding'), _CHARACTER_SETS[0][1])
for eopt in _CHARACTER_SETS:
encoding.add_item(eopt[1], eopt[0])
encoding.set_help( _('The encoding to be used for the web files'))
menu.add_option(category_name, "encoding", encoding)
fullyear = BooleanOption(_('Create "Year At A Glance" Calendar'), False)
fullyear.set_help(_('Whether to create A one-page mini calendar '
'with dates highlighted'))
menu.add_option(category_name, 'fullyear', fullyear)
makeoneday = BooleanOption(_('Create one day event pages for'
' Year At A Glance calendar'), False)
makeoneday.set_help(_('Whether to create one day pages or not'))
menu.add_option(category_name, 'makeoneday', makeoneday)
self.__links = BooleanOption(_('Link to Narrated Web Report'), False)
self.__links.set_help(_('Whether to link data to web report or not'))
menu.add_option(category_name, 'link_to_narweb', self.__links)
self.__links.connect('value-changed', self.__links_changed)
dbname = self.__db.get_dbname()
default_prefix = '../../' + dbname + "_NAVWEB/"
self.__prefix = StringOption(_('Link prefix'), default_prefix)
self.__prefix.set_help(_("A Prefix on the links to take you to "
"Narrated Web Report"))
menu.add_option(category_name, "prefix", self.__prefix)
self.__links_changed()
def __update_filters(self):
"""
Update the filter list based on the selected person
"""
gid = self.__pid.get_value()
person = self.__db.get_person_from_gramps_id(gid)
filter_list = ReportUtils.get_person_filters(person, False)
self.__filter.set_filters(filter_list)
def __filter_changed(self):
"""
Handle filter change. If the filter is not specific to a person,
disable the person option
"""
filter_value = self.__filter.get_value()
if 1 <= filter_value <= 4:
# Filters 1, 2, 3 and 4 rely on the center person
self.__pid.set_available(True)
else:
# The rest don't
self.__pid.set_available(False)
def __multiyear_changed(self):
"""
Handles the ability to print multiple year calendars or not?
"""
self.__start_year.set_available(True)
if self.__multiyear.get_value():
self.__end_year.set_available(True)
else:
self.__end_year.set_available(False)
def __links_changed(self):
"""
Handle checkbox change.
"""
if self.__links.get_value():
self.__prefix.set_available(True)
else:
self.__prefix.set_available(False)
def _get_regular_surname(sex, name):
"""
Returns a name string built from the components of the Name instance.
"""
surname = name.get_surname()
suffix = name.get_suffix()
if suffix:
surname = surname + ", " + suffix
return surname
# Simple utility list to convert Gramps day-of-week numbering
# to calendar.firstweekday numbering
dow_gramps2iso = [ -1, calendar.SUNDAY, calendar.MONDAY, calendar.TUESDAY,
calendar.WEDNESDAY, calendar.THURSDAY, calendar.FRIDAY,
calendar.SATURDAY]
def get_marriage_event(db, family):
"""
marriage_event will either be the marriage event or False
"""
marriage_event = False
for event_ref in family.get_event_ref_list():
event = db.get_event_from_handle(event_ref.ref)
if event.type.is_marriage:
marriage_event = event
elif event.type.is_divorce:
continue
# return the marriage event or False to it caller
return marriage_event
def get_first_day_of_month(year, month):
"""
Compute the first day to display for this month.
It can also be a day in the previous month.
"""
# first day of the month
current_date = datetime.date(year, month, 1)
# monthinfo is filled using standard Python library
# calendar.monthcalendar. It fills a list of 7-day-lists. The first day
# of the 7-day-list is determined by calendar.firstweekday.
monthinfo = calendar.monthcalendar(year, month)
current_ord = current_date.toordinal() - monthinfo[0].count(0)
return current_date, current_ord, monthinfo
def _has_webpage_extension(url):
"""
determine if a filename has an extension or not...
url = filename to be checked
"""
return any(url.endswith(ext) for ext in _WEB_EXT)
def get_day_list(event_date, holiday_list, bday_anniv_list):
"""
Will fill day_list and return it to its caller: calendar_build()
holiday_list -- list of holidays for event_date
bday_anniv_list -- list of birthdays and anniversaries
for event_date
event_date -- date for this day_list
'day_list' - a combination of both dictionaries to be able
to create one day nyears, date, text, event --- are
necessary for figuring the age or years married for
each day being created...
"""
# initialize day_list
day_list = []
##################################################################
# birthday/ anniversary on this day
# Date.EMPTY signifies an incomplete date for an event. See add_day_item()
bday_anniv_list = [(t, e, d) for t, e, d in bday_anniv_list
if d != Date.EMPTY]
# number of years have to be at least zero
bday_anniv_list = [(t, e, d) for t, e, d in bday_anniv_list
if (event_date.get_year() - d.get_year()) >= 0]
# a holiday
# zero will force holidays to be first in list
nyears = 0
for text, event, date in holiday_list:
day_list.append((nyears, date, text, event))
# birthday and anniversary list
for text, event, date in bday_anniv_list:
# number of years married, ex: 10
nyears = (event_date.get_year() - date.get_year())
# number of years for birthday, ex: 10 years
age_str = event_date - date
age_str.format(precision = 1, as_age=False)
# a birthday
if event == 'Birthday':
txt_str = (text + ', <em>'
# TRANSLATORS: expands to smth like "12 years old",
# where "12 years" is already localized to your language
+ (_('%s old') % str(age_str)
if nyears else _('birth'))
+ '</em>')
# an anniversary
elif event == "Anniversary":
if nyears == 0:
txt_str = _('%(couple)s, <em>wedding</em>') % {
'couple' : text}
else:
years_str = '<em>%s</em>' % nyears
# translators: leave all/any {...} untranslated
txt_str = ngettext("{couple}, {years} year anniversary",
"{couple}, {years} year anniversary",
nyears).format(couple=text, years=years_str)
txt_str = Html('span', txt_str, class_ = "yearsmarried")
day_list.append((nyears, date, txt_str, event))
# sort them based on number of years
# holidays will always be on top of event list
if sys.version_info[0] < 3:
day_list.sort()
else:
day_list= sorted(day_list, key=lambda x: (isinstance(x[0], str), x[0]))
# return to its caller calendar_build()
return day_list
|
pmghalvorsen/gramps_branch
|
gramps/plugins/webreport/webcal.py
|
Python
|
gpl-2.0
| 70,818
|
[
"Brian"
] |
1b611d0c7027d156c1b331bfa43112ddeab4a88ba77ff0056ebadd23d4b2bb34
|
# Copyright (c) 2003-2010 LOGILAB S.A. (Paris, FRANCE).
# Copyright (c) 2009-2010 Arista Networks, Inc.
# http://www.logilab.fr/ -- mailto:contact@logilab.fr
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""basic checker for Python code"""
from logilab import astng
from logilab.common.ureports import Table
from logilab.astng import are_exclusive
from pylint.interfaces import IASTNGChecker
from pylint.reporters import diff_string
from pylint.checkers import BaseChecker, EmptyReport
from pylint.checkers.utils import check_messages, clobber_in_except, is_inside_except
import re
# regex for class/function/variable/constant name
CLASS_NAME_RGX = re.compile('[A-Z_][a-zA-Z0-9]+$')
MOD_NAME_RGX = re.compile('(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$')
CONST_NAME_RGX = re.compile('(([A-Z_][A-Z0-9_]*)|(__.*__))$')
COMP_VAR_RGX = re.compile('[A-Za-z_][A-Za-z0-9_]*$')
DEFAULT_NAME_RGX = re.compile('[a-z_][a-z0-9_]{2,30}$')
# do not require a doc string on system methods
NO_REQUIRED_DOC_RGX = re.compile('__.*__')
del re
def in_loop(node):
"""return True if the node is inside a kind of for loop"""
parent = node.parent
while parent is not None:
if isinstance(parent, (astng.For, astng.ListComp, astng.SetComp,
astng.DictComp, astng.GenExpr)):
return True
parent = parent.parent
return False
def in_nested_list(nested_list, obj):
"""return true if the object is an element of <nested_list> or of a nested
list
"""
for elmt in nested_list:
if isinstance(elmt, (list, tuple)):
if in_nested_list(elmt, obj):
return True
elif elmt == obj:
return True
return False
def report_by_type_stats(sect, stats, old_stats):
"""make a report of
* percentage of different types documented
* percentage of different types with a bad name
"""
# percentage of different types documented and/or with a bad name
nice_stats = {}
for node_type in ('module', 'class', 'method', 'function'):
try:
total = stats[node_type]
except KeyError:
raise EmptyReport()
nice_stats[node_type] = {}
if total != 0:
try:
documented = total - stats['undocumented_'+node_type]
percent = (documented * 100.) / total
nice_stats[node_type]['percent_documented'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_documented'] = 'NC'
try:
percent = (stats['badname_'+node_type] * 100.) / total
nice_stats[node_type]['percent_badname'] = '%.2f' % percent
except KeyError:
nice_stats[node_type]['percent_badname'] = 'NC'
lines = ('type', 'number', 'old number', 'difference',
'%documented', '%badname')
for node_type in ('module', 'class', 'method', 'function'):
new = stats[node_type]
old = old_stats.get(node_type, None)
if old is not None:
diff_str = diff_string(old, new)
else:
old, diff_str = 'NC', 'NC'
lines += (node_type, str(new), str(old), diff_str,
nice_stats[node_type].get('percent_documented', '0'),
nice_stats[node_type].get('percent_badname', '0'))
sect.append(Table(children=lines, cols=6, rheaders=1))
def redefined_by_decorator(node):
"""return True if the object is a method redefined via decorator.
For example:
@property
def x(self): return self._x
@x.setter
def x(self, value): self._x = value
"""
if node.decorators:
for decorator in node.decorators.nodes:
if (isinstance(decorator, astng.Getattr) and
getattr(decorator.expr, 'name', None) == node.name):
return True
return False
class _BasicChecker(BaseChecker):
__implements__ = IASTNGChecker
name = 'basic'
class BasicErrorChecker(_BasicChecker):
msgs = {
'E0100': ('__init__ method is a generator',
'Used when the special class method __init__ is turned into a '
'generator by a yield in its body.'),
'E0101': ('Explicit return in __init__',
'Used when the special class method __init__ has an explicit \
return value.'),
'E0102': ('%s already defined line %s',
'Used when a function / class / method is redefined.'),
'E0103': ('%r not properly in loop',
'Used when break or continue keywords are used outside a loop.'),
'E0104': ('Return outside function',
'Used when a "return" statement is found outside a function or '
'method.'),
'E0105': ('Yield outside function',
'Used when a "yield" statement is found outside a function or '
'method.'),
'E0106': ('Return with argument inside generator',
'Used when a "return" statement with an argument is found '
'outside in a generator function or method (e.g. with some '
'"yield" statements).'),
'E0107': ("Use of the non-existent %s operator",
"Used when you attempt to use the C-style pre-increment or"
"pre-decrement operator -- and ++, which doesn't exist in Python."),
}
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
@check_messages('E0102')
def visit_class(self, node):
self._check_redefinition('class', node)
@check_messages('E0100', 'E0101', 'E0102', 'E0106')
def visit_function(self, node):
if not redefined_by_decorator(node):
self._check_redefinition(node.is_method() and 'method' or 'function', node)
# checks for max returns, branch, return in __init__
returns = node.nodes_of_class(astng.Return,
skip_klass=(astng.Function, astng.Class))
if node.is_method() and node.name == '__init__':
if node.is_generator():
self.add_message('E0100', node=node)
else:
values = [r.value for r in returns]
if [v for v in values if not (v is None or
(isinstance(v, astng.Const) and v.value is None)
or (isinstance(v, astng.Name) and v.name == 'None'))]:
self.add_message('E0101', node=node)
elif node.is_generator():
# make sure we don't mix non-None returns and yields
for retnode in returns:
if isinstance(retnode.value, astng.Const) and \
retnode.value.value is not None:
self.add_message('E0106', node=node,
line=retnode.fromlineno)
@check_messages('E0104')
def visit_return(self, node):
if not isinstance(node.frame(), astng.Function):
self.add_message('E0104', node=node)
@check_messages('E0105')
def visit_yield(self, node):
if not isinstance(node.frame(), astng.Function):
self.add_message('E0105', node=node)
@check_messages('E0103')
def visit_continue(self, node):
self._check_in_loop(node, 'continue')
@check_messages('E0103')
def visit_break(self, node):
self._check_in_loop(node, 'break')
@check_messages('E0107')
def visit_unaryop(self, node):
"""check use of the non-existent ++ adn -- operator operator"""
if ((node.op in '+-') and
isinstance(node.operand, astng.UnaryOp) and
(node.operand.op == node.op)):
self.add_message('E0107', node=node, args=node.op*2)
def _check_in_loop(self, node, node_name):
"""check that a node is inside a for or while loop"""
_node = node.parent
while _node:
if isinstance(_node, (astng.For, astng.While)):
break
_node = _node.parent
else:
self.add_message('E0103', node=node, args=node_name)
def _check_redefinition(self, redeftype, node):
"""check for redefinition of a function / method / class name"""
defined_self = node.parent.frame()[node.name]
if defined_self is not node and not are_exclusive(node, defined_self):
self.add_message('E0102', node=node,
args=(redeftype, defined_self.fromlineno))
class BasicChecker(_BasicChecker):
"""checks for :
* doc strings
* modules / classes / functions / methods / arguments / variables name
* number of arguments, local variables, branches, returns and statements in
functions, methods
* required module attributes
* dangerous default values as arguments
* redefinition of function / method / class
* uses of the global statement
"""
__implements__ = IASTNGChecker
name = 'basic'
msgs = {
'W0101': ('Unreachable code',
'Used when there is some code behind a "return" or "raise" \
statement, which will never be accessed.'),
'W0102': ('Dangerous default value %s as argument',
'Used when a mutable value as list or dictionary is detected in \
a default value for an argument.'),
'W0104': ('Statement seems to have no effect',
'Used when a statement doesn\'t have (or at least seems to) \
any effect.'),
'W0105': ('String statement has no effect',
'Used when a string is used as a statement (which of course \
has no effect). This is a particular case of W0104 with its \
own message so you can easily disable it if you\'re using \
those strings as documentation, instead of comments.'),
'W0106': ('Expression "%s" is assigned to nothing',
'Used when an expression that is not a function call is assigned\
to nothing. Probably something else was intended.'),
'W0108': ('Lambda may not be necessary',
'Used when the body of a lambda expression is a function call \
on the same argument list as the lambda itself; such lambda \
expressions are in all but a few cases replaceable with the \
function being called in the body of the lambda.'),
'W0109': ("Duplicate key %r in dictionary",
"Used when a dictionary expression binds the same key multiple \
times."),
'W0122': ('Use of the exec statement',
'Used when you use the "exec" statement, to discourage its \
usage. That doesn\'t mean you can not use it !'),
'W0141': ('Used builtin function %r',
'Used when a black listed builtin function is used (see the '
'bad-function option). Usual black listed functions are the ones '
'like map, or filter , where Python offers now some cleaner '
'alternative like list comprehension.'),
'W0142': ('Used * or ** magic',
'Used when a function or method is called using `*args` or '
'`**kwargs` to dispatch arguments. This doesn\'t improve '
'readability and should be used with care.'),
'W0150': ("%s statement in finally block may swallow exception",
"Used when a break or a return statement is found inside the \
finally clause of a try...finally block: the exceptions raised \
in the try clause will be silently swallowed instead of being \
re-raised."),
'W0199': ('Assert called on a 2-uple. Did you mean \'assert x,y\'?',
'A call of assert on a tuple will always evaluate to true if '
'the tuple is not empty, and will always evaluate to false if '
'it is.'),
'C0121': ('Missing required attribute "%s"', # W0103
'Used when an attribute required for modules is missing.'),
}
options = (('required-attributes',
{'default' : (), 'type' : 'csv',
'metavar' : '<attributes>',
'help' : 'Required attributes for module, separated by a '
'comma'}
),
('bad-functions',
{'default' : ('map', 'filter', 'apply', 'input'),
'type' :'csv', 'metavar' : '<builtin function names>',
'help' : 'List of builtins function names that should not be '
'used, separated by a comma'}
),
)
reports = ( ('RP0101', 'Statistics by type', report_by_type_stats), )
def __init__(self, linter):
_BasicChecker.__init__(self, linter)
self.stats = None
self._tryfinallys = None
def open(self):
"""initialize visit variables and statistics
"""
self._tryfinallys = []
self.stats = self.linter.add_stats(module=0, function=0,
method=0, class_=0)
def visit_module(self, node):
"""check module name, docstring and required arguments
"""
self.stats['module'] += 1
for attr in self.config.required_attributes:
if attr not in node:
self.add_message('C0121', node=node, args=attr)
def visit_class(self, node):
"""check module name, docstring and redefinition
increment branch counter
"""
self.stats['class'] += 1
@check_messages('W0104', 'W0105')
def visit_discard(self, node):
"""check for various kind of statements without effect"""
expr = node.value
if isinstance(expr, astng.Const) and isinstance(expr.value,
basestring):
# treat string statement in a separated message
self.add_message('W0105', node=node)
return
# ignore if this is :
# * a direct function call
# * the unique child of a try/except body
# * a yield (which are wrapped by a discard node in _ast XXX)
# warn W0106 if we have any underlying function call (we can't predict
# side effects), else W0104
if (isinstance(expr, (astng.Yield, astng.CallFunc)) or
(isinstance(node.parent, astng.TryExcept) and
node.parent.body == [node])):
return
if any(expr.nodes_of_class(astng.CallFunc)):
self.add_message('W0106', node=node, args=expr.as_string())
else:
self.add_message('W0104', node=node)
@check_messages('W0108')
def visit_lambda(self, node):
"""check whether or not the lambda is suspicious
"""
# if the body of the lambda is a call expression with the same
# argument list as the lambda itself, then the lambda is
# possibly unnecessary and at least suspicious.
if node.args.defaults:
# If the arguments of the lambda include defaults, then a
# judgment cannot be made because there is no way to check
# that the defaults defined by the lambda are the same as
# the defaults defined by the function called in the body
# of the lambda.
return
call = node.body
if not isinstance(call, astng.CallFunc):
# The body of the lambda must be a function call expression
# for the lambda to be unnecessary.
return
# XXX are lambda still different with astng >= 0.18 ?
# *args and **kwargs need to be treated specially, since they
# are structured differently between the lambda and the function
# call (in the lambda they appear in the args.args list and are
# indicated as * and ** by two bits in the lambda's flags, but
# in the function call they are omitted from the args list and
# are indicated by separate attributes on the function call node).
ordinary_args = list(node.args.args)
if node.args.kwarg:
if (not call.kwargs
or not isinstance(call.kwargs, astng.Name)
or node.args.kwarg != call.kwargs.name):
return
elif call.kwargs:
return
if node.args.vararg:
if (not call.starargs
or not isinstance(call.starargs, astng.Name)
or node.args.vararg != call.starargs.name):
return
elif call.starargs:
return
# The "ordinary" arguments must be in a correspondence such that:
# ordinary_args[i].name == call.args[i].name.
if len(ordinary_args) != len(call.args):
return
for i in xrange(len(ordinary_args)):
if not isinstance(call.args[i], astng.Name):
return
if node.args.args[i].name != call.args[i].name:
return
self.add_message('W0108', line=node.fromlineno, node=node)
def visit_function(self, node):
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
self.stats[node.is_method() and 'method' or 'function'] += 1
# check for dangerous default values as arguments
for default in node.args.defaults:
try:
value = default.infer().next()
except astng.InferenceError:
continue
if isinstance(value, (astng.Dict, astng.List)):
if value is default:
msg = default.as_string()
else:
msg = '%s (%s)' % (default.as_string(), value.as_string())
self.add_message('W0102', node=node, args=(msg,))
if value.qname() == '__builtin__.set':
if isinstance(default, astng.CallFunc):
msg = default.as_string()
else:
msg = '%s (%s)' % (default.as_string(), value.qname())
self.add_message('W0102', node=node, args=(msg,))
@check_messages('W0101', 'W0150')
def visit_return(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
self._check_unreachable(node)
# Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'return', (astng.Function,))
@check_messages('W0101')
def visit_continue(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('W0101', 'W0150')
def visit_break(self, node):
"""1 - check is the node has a right sibling (if so, that's some
unreachable code)
2 - check is the node is inside the finally clause of a try...finally
block
"""
# 1 - Is it right sibling ?
self._check_unreachable(node)
# 2 - Is it inside final body of a try...finally bloc ?
self._check_not_in_finally(node, 'break', (astng.For, astng.While,))
@check_messages('W0101')
def visit_raise(self, node):
"""check is the node has a right sibling (if so, that's some unreachable
code)
"""
self._check_unreachable(node)
@check_messages('W0122')
def visit_exec(self, node):
"""just print a warning on exec statements"""
self.add_message('W0122', node=node)
@check_messages('W0141', 'W0142')
def visit_callfunc(self, node):
"""visit a CallFunc node -> check if this is not a blacklisted builtin
call and check for * or ** use
"""
if isinstance(node.func, astng.Name):
name = node.func.name
# ignore the name if it's not a builtin (i.e. not defined in the
# locals nor globals scope)
if not (name in node.frame() or
name in node.root()):
if name in self.config.bad_functions:
self.add_message('W0141', node=node, args=name)
if node.starargs or node.kwargs:
scope = node.scope()
if isinstance(scope, astng.Function):
toprocess = [(n, vn) for (n, vn) in ((node.starargs, scope.args.vararg),
(node.kwargs, scope.args.kwarg)) if n]
if toprocess:
for cfnode, fargname in toprocess[:]:
if getattr(cfnode, 'name', None) == fargname:
toprocess.remove((cfnode, fargname))
if not toprocess:
return # W0142 can be skipped
self.add_message('W0142', node=node.func)
@check_messages('W0199')
def visit_assert(self, node):
"""check the use of an assert statement on a tuple."""
if node.fail is None and isinstance(node.test, astng.Tuple) and \
len(node.test.elts) == 2:
self.add_message('W0199', line=node.fromlineno, node=node)
@check_messages('W0109')
def visit_dict(self, node):
"""check duplicate key in dictionary"""
keys = set()
for k, v in node.items:
if isinstance(k, astng.Const):
key = k.value
if key in keys:
self.add_message('W0109', node=node, args=key)
keys.add(key)
def visit_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.append(node)
def leave_tryfinally(self, node):
"""update try...finally flag"""
self._tryfinallys.pop()
def _check_unreachable(self, node):
"""check unreachable code"""
unreach_stmt = node.next_sibling()
if unreach_stmt is not None:
self.add_message('W0101', node=unreach_stmt)
def _check_not_in_finally(self, node, node_name, breaker_classes=()):
"""check that a node is not inside a finally clause of a
try...finally statement.
If we found before a try...finally bloc a parent which its type is
in breaker_classes, we skip the whole check."""
# if self._tryfinallys is empty, we're not a in try...finally bloc
if not self._tryfinallys:
return
# the node could be a grand-grand...-children of the try...finally
_parent = node.parent
_node = node
while _parent and not isinstance(_parent, breaker_classes):
if hasattr(_parent, 'finalbody') and _node in _parent.finalbody:
self.add_message('W0150', node=node, args=node_name)
return
_node = _parent
_parent = _node.parent
class NameChecker(_BasicChecker):
msgs = {
'C0102': ('Black listed name "%s"',
'Used when the name is listed in the black list (unauthorized \
names).'),
'C0103': ('Invalid name "%s" (should match %s)',
'Used when the name doesn\'t match the regular expression \
associated to its type (constant, variable, class...).'),
}
options = (('module-rgx',
{'default' : MOD_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'module names'}
),
('const-rgx',
{'default' : CONST_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'module level names'}
),
('class-rgx',
{'default' : CLASS_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'class names'}
),
('function-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'function names'}
),
('method-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'method names'}
),
('attr-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'instance attribute names'}
),
('argument-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'argument names'}),
('variable-rgx',
{'default' : DEFAULT_NAME_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'variable names'}
),
('inlinevar-rgx',
{'default' : COMP_VAR_RGX,
'type' :'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match correct '
'list comprehension / generator expression variable \
names'}
),
# XXX use set
('good-names',
{'default' : ('i', 'j', 'k', 'ex', 'Run', '_'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Good variable names which should always be accepted,'
' separated by a comma'}
),
('bad-names',
{'default' : ('foo', 'bar', 'baz', 'toto', 'tutu', 'tata'),
'type' :'csv', 'metavar' : '<names>',
'help' : 'Bad variable names which should always be refused, '
'separated by a comma'}
),
)
def open(self):
self.stats = self.linter.add_stats(badname_module=0,
badname_class=0, badname_function=0,
badname_method=0, badname_attr=0,
badname_const=0,
badname_variable=0,
badname_inlinevar=0,
badname_argument=0)
@check_messages('C0102', 'C0103')
def visit_module(self, node):
self._check_name('module', node.name.split('.')[-1], node)
@check_messages('C0102', 'C0103')
def visit_class(self, node):
self._check_name('class', node.name, node)
for attr, anodes in node.instance_attrs.items():
self._check_name('attr', attr, anodes[0])
@check_messages('C0102', 'C0103')
def visit_function(self, node):
self._check_name(node.is_method() and 'method' or 'function',
node.name, node)
# check arguments name
args = node.args.args
if args is not None:
self._recursive_check_names(args, node)
@check_messages('C0102', 'C0103')
def visit_assname(self, node):
"""check module level assigned names"""
frame = node.frame()
ass_type = node.ass_type()
if isinstance(ass_type, (astng.Comprehension, astng.Comprehension)):
self._check_name('inlinevar', node.name, node)
elif isinstance(frame, astng.Module):
if isinstance(ass_type, astng.Assign) and not in_loop(ass_type):
self._check_name('const', node.name, node)
elif isinstance(ass_type, astng.ExceptHandler):
self._check_name('variable', node.name, node)
elif isinstance(frame, astng.Function):
# global introduced variable aren't in the function locals
if node.name in frame:
self._check_name('variable', node.name, node)
def _recursive_check_names(self, args, node):
"""check names in a possibly recursive list <arg>"""
for arg in args:
if isinstance(arg, astng.AssName):
self._check_name('argument', arg.name, node)
else:
self._recursive_check_names(arg.elts, node)
def _check_name(self, node_type, name, node):
"""check for a name using the type's regexp"""
if is_inside_except(node):
clobbering, _ = clobber_in_except(node)
if clobbering:
return
if name in self.config.good_names:
return
if name in self.config.bad_names:
self.stats['badname_' + node_type] += 1
self.add_message('C0102', node=node, args=name)
return
regexp = getattr(self.config, node_type + '_rgx')
if regexp.match(name) is None:
self.add_message('C0103', node=node, args=(name, regexp.pattern))
self.stats['badname_' + node_type] += 1
class DocStringChecker(_BasicChecker):
msgs = {
'C0111': ('Missing docstring', # W0131
'Used when a module, function, class or method has no docstring.\
Some special methods like __init__ doesn\'t necessary require a \
docstring.'),
'C0112': ('Empty docstring', # W0132
'Used when a module, function, class or method has an empty \
docstring (it would be too easy ;).'),
}
options = (('no-docstring-rgx',
{'default' : NO_REQUIRED_DOC_RGX,
'type' : 'regexp', 'metavar' : '<regexp>',
'help' : 'Regular expression which should only match '
'functions or classes name which do not require a '
'docstring'}
),
)
def open(self):
self.stats = self.linter.add_stats(undocumented_module=0,
undocumented_function=0,
undocumented_method=0,
undocumented_class=0)
def visit_module(self, node):
self._check_docstring('module', node)
def visit_class(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
self._check_docstring('class', node)
def visit_function(self, node):
if self.config.no_docstring_rgx.match(node.name) is None:
ftype = node.is_method() and 'method' or 'function'
if isinstance(node.parent.frame(), astng.Class):
overridden = False
# check if node is from a method overridden by its ancestor
for ancestor in node.parent.frame().ancestors():
if node.name in ancestor and \
isinstance(ancestor[node.name], astng.Function):
overridden = True
break
if not overridden:
self._check_docstring(ftype, node)
else:
self._check_docstring(ftype, node)
def _check_docstring(self, node_type, node):
"""check the node has a non empty docstring"""
docstring = node.doc
if docstring is None:
self.stats['undocumented_'+node_type] += 1
self.add_message('C0111', node=node)
elif not docstring.strip():
self.stats['undocumented_'+node_type] += 1
self.add_message('C0112', node=node)
class PassChecker(_BasicChecker):
"""check is the pass statement is really necessary"""
msgs = {'W0107': ('Unnecessary pass statement',
'Used when a "pass" statement that can be avoided is '
'encountered.)'),
}
def visit_pass(self, node):
if len(node.parent.child_sequence(node)) > 1:
self.add_message('W0107', node=node)
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
|
pronto/dotfiles
|
.vim/pylibs/pylint/checkers/base.py
|
Python
|
bsd-2-clause
| 33,455
|
[
"VisIt"
] |
eeb3cf1f00445e04a916758ac74a7534cdd7baedbb74b7c32abaeb353a798edf
|
from django.test import TestCase
from myvoice.core.tests import factories
from .. import models
class TestDisplayLabel(TestCase):
Model = models.DisplayLabel
Factory = factories.DisplayLabel
def test_unicode(self):
"""Smoke test for string representation."""
obj = self.Factory.create(name='Test')
self.assertEqual(str(obj), 'Test')
class TestSurveyQuestionResponse(TestCase):
Model = models.SurveyQuestionResponse
Factory = factories.SurveyQuestionResponse
def setUp(self):
self.survey = factories.Survey(role=models.Survey.PATIENT_FEEDBACK)
self.question = factories.SurveyQuestion.create(
survey=self.survey, label='Test', categories="Yes\nNo")
def test_unicode(self):
"""Smoke test for string representation."""
obj = self.Factory.create(response='test', question=self.question)
self.assertEqual('test', str(obj))
def test_positive_response(self):
"""Test that positive response is saved for correct answer."""
response = factories.SurveyQuestionResponse.create(
question=self.question,
response='Yes')
self.assertTrue(response.positive_response)
def test_negative_response(self):
"""Test that positive response is not saved for wrong answer."""
response = factories.SurveyQuestionResponse.create(
question=self.question,
response='No')
self.assertIsNone(response.positive_response)
def test_last_negative(self):
"""Test that positive response is saved for last negatives too."""
question = factories.SurveyQuestion.create(
survey=self.survey,
label='Test1',
last_negative=True,
categories="one\ntwo\nthree\nbad")
response1 = factories.SurveyQuestionResponse.create(
question=question,
response='three')
self.assertTrue(response1.positive_response)
response2 = factories.SurveyQuestionResponse.create(
question=question,
response='bad')
self.assertIsNone(response2.positive_response)
class TestSurveyResponseVisit(TestCase):
"""Test saving survey question response affects visit indices."""
def setUp(self):
self.survey = factories.Survey(role=models.Survey.PATIENT_FEEDBACK)
# self.question = factories.SurveyQuestion.create(
# survey=self.survey, label='Test1', categories="Yes\nNo")
self.visit = factories.Visit.create()
def test_satisfaction(self):
"""Test that patient satisfaction is determined properly."""
visit1 = factories.Visit.create()
visit2 = factories.Visit.create()
question = factories.SurveyQuestion.create(
survey=self.survey, for_satisfaction=True, label='Test', categories="Yes\nNo")
factories.SurveyQuestionResponse.create(
question=question, response='Yes', visit=visit1)
self.assertTrue(visit1.satisfied)
factories.SurveyQuestionResponse.create(
question=question, response='No', visit=visit2)
self.assertFalse(visit2.satisfied)
def test_satisfaction_alread_negative(self):
"""Test that patient satisfaction is not changed if already dis-satisfied."""
visit = factories.Visit.create(satisfied=False)
question = factories.SurveyQuestion.create(
survey=self.survey, for_satisfaction=True, label='Test', categories="Yes\nNo")
factories.SurveyQuestionResponse.create(
question=question, response='Yes', visit=visit)
self.assertFalse(visit.satisfied)
def test_satisfaction_last_negative(self):
"""Test that patient satisfaction is determined properly
for questions whose answers are all positive save the last one."""
visit1 = factories.Visit.create()
visit2 = factories.Visit.create()
visit3 = factories.Visit.create()
question = factories.SurveyQuestion.create(
survey=self.survey,
for_satisfaction=True,
label='Test',
last_negative=True,
categories="Yes\nmaybe\nno")
factories.SurveyQuestionResponse.create(
question=question, response='Yes', visit=visit1)
self.assertTrue(visit1.satisfied)
factories.SurveyQuestionResponse.create(
question=question, response='maybe', visit=visit2)
self.assertTrue(visit2.satisfied)
factories.SurveyQuestionResponse.create(
question=question, response='no', visit=visit3)
self.assertFalse(visit3.satisfied)
def test_participation(self):
"""Test that patient survey participation is saved properly."""
visit1 = factories.Visit.create()
question = factories.SurveyQuestion.create(
survey=self.survey, label='Test')
factories.SurveyQuestionResponse.create(
question=question, response='no', visit=visit1)
self.assertTrue(visit1.survey_started)
def test_completion(self):
"""Test that patient survey completion is saved properly."""
visit1 = factories.Visit.create()
visit2 = factories.Visit.create()
question1 = factories.SurveyQuestion.create(
survey=self.survey, label='Test')
question2 = factories.SurveyQuestion.create(
survey=self.survey, last_required=True, label='Test1')
factories.SurveyQuestionResponse.create(
question=question1, response='no', visit=visit1)
factories.SurveyQuestionResponse.create(
question=question2, response='no', visit=visit2)
self.assertFalse(visit1.survey_completed)
self.assertTrue(visit2.survey_completed)
|
myvoice-nigeria/myvoice
|
myvoice/survey/tests/test_models.py
|
Python
|
bsd-2-clause
| 5,792
|
[
"VisIt"
] |
41a7638a392fa2e10d06def68edbef0f59d8dce8eed610a476d8e4c809862de3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# model.py
#
# Copyright 2012 Greg Green <greg@greg-UX31A>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
#
#
import sys, argparse
from os.path import abspath, expanduser
import matplotlib as mplib
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator, MaxNLocator
from matplotlib.patches import Rectangle
import numpy as np
import scipy
from scipy.integrate import quad
from scipy.interpolate import interp1d, InterpolatedUnivariateSpline, RectBivariateSpline
class TGalacticModel:
rho_0 = None
R0 = None
Z0 = None
H1, L1 = None, None
f, H2, L2 = None, None, None
fh, qh, nh, fh_outer, nh_outer, Rbr = None, None, None, None, None, None
H_mu, Delta_mu, mu_FeH_inf = None, None, None
def __init__(self, R0=8000., Z0=25., L1=2600., H1=300., f=0.12,
L2=3600., H2=900., fh=0.0051, qh=0.70, nh=-2.62,
nh_outer=-3.8, Rbr=27.8, rho_0=0.0058, Rep=500.,
H_mu=500., Delta_mu=0.55, mu_FeH_inf=-0.82,
LF_fname=expanduser('~/projects/bayestar/data/PSMrLF.dat')):
self.R0, self.Z0 = R0, Z0
self.L1, self.H1 = L1, H1
self.f, self.L2, self.H2 = f, L2, H2
self.fh, self.qh, self.nh, self.nh_outer, self.Rbr = fh, qh, nh, nh_outer, Rbr*1000.
self.Rep = Rep
self.rho_0 = 1. #rho_0
self.H_mu, self.Delta_mu, self.mu_FeH_inf = H_mu, Delta_mu, mu_FeH_inf
self.fh_outer = self.fh * (self.Rbr/self.R0)**(self.nh-self.nh_outer)
self.L_epsilon = 0.
# Bulge (Robin et al. 2003, i.e. Besancon)
self.R_c = 2540.
self.x_0 = 1590.
self.y_0 = 424.
self.z_0 = 424.
self.alpha = 78.9 * np.pi / 180.
self.beta = 3.5 * np.pi / 180.
self.gamma = 91.3 * np.pi / 180.
self.f_bulge = 1. # (Ratio of bulge to thin disk at the GC)
self.bulge_rot = rotation_matrix(self.alpha, self.beta, self.gamma)
# Drimmel & Spergel (2001)
self.H_ISM = 134.4
self.L_ISM = 2260.
self.dH_dR_ISM = 0.0148
self.R_flair_ISM = 4400.
self.data = np.loadtxt(abspath(LF_fname),
usecols=(0,1),
dtype=[('Mr','f4'), ('LF','f4')],
unpack=False)
self.Mr_min = np.min(self.data['Mr'])
self.Mr_max = np.max(self.data['Mr'])
self.LF = interp1d(self.data['Mr'], self.data['LF'], kind='linear')
#self.LF = InterpolatedUnivariateSpline(LF['Mr'], LF['LF'])
# Normalize dN/dV to value from LF in Solar neighborhood
LF_integral = quad(self.LF, self.Mr_min, self.Mr_max, epsrel=1.e-5, limit=200)[0]
#print LF_integral
self.rho_0 = LF_integral / self.rho_rz(self.R0, self.Z0)
def Cartesian_coords(self, DM, cos_l, sin_l, cos_b, sin_b):
d = 10.**(DM/5. + 1.)
x = self.R0 - cos_l*cos_b*d
y = -sin_l*cos_b*d
z = sin_b*d
return x, y, z
def Cartesian_2_cylindrical(self, x, y, z):
r = np.sqrt(x*x + y*y)
return r, z
def gal_2_cylindrical(self, l, b, DM):
cos_l, sin_l = np.cos(np.pi/180. * l), np.sin(np.pi/180. * l)
cos_b, sin_b = np.cos(np.pi/180. * b), np.sin(np.pi/180. * b)
x,y,z = self.Cartesian_coords(DM, cos_l, sin_l, cos_b, sin_b)
return self.Cartesian_2_cylindrical(x, y, z)
def rho_thin(self, r, z):
r_eff = np.sqrt(r*r + self.L_epsilon*self.L_epsilon)
return (
self.rho_0
* np.exp( - (np.abs(z+self.Z0) - np.abs(self.Z0)) / self.H1
- (r_eff-self.R0) / self.L1 )
)
def rho_thick(self, r, z):
r_eff = np.sqrt(r*r + self.L_epsilon*self.L_epsilon)
return (
self.rho_0 * self.f
* np.exp( - (np.abs(z+self.Z0) - np.abs(self.Z0)) / self.H2
- (r_eff-self.R0) / self.L2 )
)
def rho_halo(self, r, z):
r_eff2 = r*r + (z/self.qh)*(z/self.qh) + self.Rep*self.Rep
if type(r_eff2) == np.ndarray:
ret = np.empty(r_eff2.size, dtype=np.float64)
r_eff2.shape = (r.size,)
idx = (r_eff2 <= self.Rbr*self.Rbr)
ret[idx] = self.rho_0 * self.fh * np.power(r_eff2[idx]/self.R0/self.R0, self.nh/2.)
ret[~idx] = self.rho_0 * self.fh_outer * np.power(r_eff2[~idx]/self.R0/self.R0, self.nh_outer/2.)
ret.shape = r.shape
return ret
else:
if r_eff2 <= self.Rbr*self.Rbr:
return self.rho_0 * self.fh * (r_eff2/self.R0/self.R0)**(self.nh/2.)
else:
return self.rho_0 * self.fh_outer * (r_eff2/self.R0/self.R0)**(self.nh_outer/2.)
def Cartesian_2_bulge(self, x, y, z):
r = np.array([x, y, z])
shape = r.shape
r.shape = (3, r.size/3)
ret = np.einsum('ij,jk->jk', self.bulge_rot, r)
ret.shape = shape
'''
if type(x) != np.ndarray:
ret = np.einsum('ij,j', self.bulge_rot, r)
elif type(x) == np.ndarray:
print self.bulge_rot.shape
print r.shape
ret = np.einsum('ij,j...->i...', self.bulge_rot, r)
else:
print 'type:', type(x)
raise TypeError('x is of wrong type')
'''
return ret
def rho_bulge(self, x, y, z):
x_b, y_b, z_b = self.Cartesian_2_bulge(x, y, z)
r_s = np.power( ((x_b/self.x_0)**2 + (y_b/self.y_0)**2)**2 + (z_b/self.z_0)**4, 0.25)
rho = np.exp(-0.5 * r_s**2)
if type(x) == np.ndarray:
idx = (x_b**2 + y_b**2 > self.R_c**2.)
d = np.sqrt(x_b[idx]**2 + y_b[idx]**2) - self.R_c
rho[idx] *= np.exp(-0.5 * (d / 500.)**2)
else:
if x_b**2. + y_b**2. > self.R_c**2.:
d = np.sqrt(x_b**2. + y_b**2.) - self.R_c
rho *= np.exp(-0.5 * (d/500.)**2.)
#return x_b**2 + y_b**2 - self.R_c**2
return self.f_bulge * rho
def H_ISM_of_R(self, r):
return self.H_ISM + self.dH_dR_ISM * np.where(r > self.R_flair_ISM, r - self.R_flair_ISM, 0.)
def rho_ISM(self, r, z):
#r_eff = np.sqrt(r*r + self.L_epsilon*self.L_epsilon)
r_eff = r
H = self.H_ISM_of_R(r)
rad_term_outer = np.exp(-r_eff / self.L_ISM)
rad_term_inner = ( np.exp(-0.5 * self.R0 / self.L_ISM)
* np.exp(-(r_eff - 0.5*self.R0)*(r_eff - 0.5*self.R0) / (0.25 * self.R0*self.R0)) )
rad_term = np.where(r_eff > 0.5 * self.R0, rad_term_outer, rad_term_inner)
h_term = 1. / np.power(np.cosh((z+self.Z0) / H), 2.)
return rad_term * h_term
def f_halo(self, DM, cos_l, sin_l, cos_b, sin_b):
x,y,z = self.Cartesian_coords(DM, cos_l, sin_l, cos_b, sin_b)
r = np.sqrt(x*x + y*y)
return self.rho_rz(r, z, component='halo') / self.rho(DM, cos_l, sin_l,
cos_b, sin_b)
def f_halo_bulge(self, DM, cos_l, sin_l, cos_b, sin_b):
x, y, z = self.Cartesian_coords(DM, cos_l, sin_l, cos_b, sin_b)
r = np.sqrt(x*x + y*y)
rho_disk = self.rho_rz(r, z, component='disk')
rho_halo = self.rho_rz(r, z, component='halo')
rho_bulge = self.rho_bulge(x, y, z)
rho_tot = rho_disk + rho_halo + rho_bulge
f_halo = rho_halo / rho_tot
f_bulge = rho_bulge / rho_tot
return f_halo, f_bulge
def rho_rz(self, r, z, component=None):
if component == 'disk':
return self.rho_thin(r,z) + self.rho_thick(r,z)
elif component == 'thin':
return self.rho_thin(r,z)
elif component == 'thick':
return self.rho_thick(r,z)
elif component == 'halo':
return self.rho_halo(r,z)
else:
return self.rho_thin(r,z) + self.rho_thick(r,z) + self.rho_halo(r,z)
def rho_xyz(self, x, y, z, component=None):
if component == None:
r = np.sqrt(x*x + y*y)
rho = self.rho_rz(r, z)
rho += self.rho_bulge(x, y, z)
return rho
elif component in ['disk', 'thin', 'thick', 'halo']:
r = np.sqrt(x*x + y*y)
return self.rho_rz(r, z, component=component)
elif component == 'bulge':
return self.rho_bulge(x, y, z)
else:
raise ValueError("Unrecognized Galactic component: '%s'" % component)
def rho(self, DM, cos_l, sin_l, cos_b, sin_b, component=None):
x,y,z = self.Cartesian_coords(DM, cos_l, sin_l, cos_b, sin_b)
return self.rho_xyz(x, y, z, component=component)
def dn_dDM(self, DM, cos_l, sin_l, cos_b, sin_b, radius=1.,
component=None, correct=False):
dV_dDM = np.pi * radius**2. * dV_dDM_dOmega(DM)
dN_dDM_tmp = self.rho(DM, cos_l, sin_l, cos_b, sin_b, component) * dV_dDM
if correct:
dN_dDM_tmp *= self.dn_dDM_corr(DM, m_max)
return dN_dDM_tmp
def dn_dDM_corr(self, DM, m_max=23.):
Mr_max = m_max - DM
if Mr_max < self.LF['Mr'][0]:
return 0.
i_max = np.argmin(np.abs(self.LF['Mr'] - Mr_max))
return np.sum(self.LF['LF'][:i_max+1])
def mu_FeH_D(self, z):
return self.mu_FeH_inf + self.Delta_mu*np.exp(-np.abs(z)/self.H_mu)
def p_FeH(self, FeH, DM, cos_l, sin_l, cos_b=None, sin_b=None):
x,y,z = DM, cos_l, sin_l
if (cos_b != None) and (sin_b != None):
x,y,z = self.Cartesian_coords(DM, cos_l, sin_l, cos_b, sin_b)
r = np.sqrt(x*x + y*y)
rho_halo_tmp = self.rho_halo(r,z)
f_halo = rho_halo_tmp / (rho_halo_tmp + self.rho_thin(r,z) + self.rho_thick(r,z))
# Disk metallicity
a = self.mu_FeH_D(z) - 0.067
p_D = 0.63*Gaussian(FeH, a, 0.2) + 0.37*Gaussian(FeH, a+0.14, 0.2)
# Halo metallicity
p_H = Gaussian(FeH, -1.46, 0.3)
return (1.-f_halo)*p_D + f_halo*p_H
def p_FeH_los(self, FeH, cos_l, sin_l, cos_b, sin_b, radius=1.,
DM_min=-5., DM_max=30.):
func = lambda x, Z: self.p_FeH(Z, x, cos_l, sin_l, cos_b, sin_b) * self.dn_dDM(x, cos_l, sin_l, cos_b, sin_b, radius)
normfunc = lambda x: self.dn_dDM(x, cos_l, sin_l, cos_b, sin_b, radius)
norm = quad(normfunc, DM_min, DM_max, epsrel=1.e-5, full_output=1)[0]
ret = np.empty(len(FeH), dtype='f8')
for i,Z in enumerate(FeH):
ret[i] = quad(func, DM_min, DM_max, args=Z, epsrel=1.e-2, full_output=1)[0]
return ret / norm
#
#return quad(func, DM_min, DM_max, epsrel=1.e-5)[0] / quad(normfunc, DM_min, DM_max, epsrel=1.e-5)[0]
def tot_num_stars(self, l, b, radius=1., component=None):
radius = np.pi / 180. * radius
l = np.pi / 180. * l
b = np.pi / 180. * b
cos_l, sin_l = np.cos(l), np.sin(l)
cos_b, sin_b = np.cos(b), np.sin(b)
dN_dDM_func = lambda DM: self.dn_dDM(DM, cos_l, sin_l, cos_b, sin_b,
component=component)
N_tot = np.pi * radius**2. * quad(dN_dDM_func, -5., 30., epsrel=1.e-5, limit=200)[0]
return N_tot
def dA_dmu(self, l, b, DM):
r, z = self.gal_2_cylindrical(l, b, DM)
return self.rho_ISM(r, z) * np.power(10., DM / 5.)
def EBV_prior(self, l, b, n_regions=20, EBV_per_kpc=0.2, sigma_bin=1.5, norm_dist=1.):
mu_0, mu_1 = 4., 19.
DM = np.linspace(mu_0, mu_1, n_regions+1)
Delta_DM = DM[1] - DM[0]
Delta_EBV = np.empty(n_regions+1, dtype='f8')
# Integrate from d = 0 to beginning of first distance bin
DM_fine = np.linspace(mu_0 - 10., mu_0, 1000)
Delta_EBV[0] = np.sum(self.dA_dmu(l, b, DM_fine)) * (10. / 1000.)
# Find Delta EBV in each distance bin
DM_fine = np.linspace(mu_0, mu_1, 64 * n_regions)
Delta_EBV_tmp = self.dA_dmu(l, b, DM_fine)
Delta_EBV[1:] = downsample_by_four(downsample_by_four(downsample_by_four(Delta_EBV_tmp))) * Delta_DM
# 1.5 orders of magnitude variance
#std_dev_coeff = np.array([3.6506, -0.047222, -0.021878, 0.0010066, -7.6386e-06])
#mean_bias_coeff = np.array([0.57694, 0.037259, -0.001347, -4.6156e-06])
# 1 order of magnitude variance
#std_dev_coeff = np.array([2.4022, -0.040931, -0.012309, 0.00039482, 3.1342e-06])
#mean_bias_coeff = np.array([0.52751, 0.022036, -0.0010742, 7.0748e-06])
# Calculate bias and std. dev. of reddening in each bin
dist = np.power(10., DM / 5. + 1.) # in pc
Delta_dist = np.hstack([dist[0], np.diff(dist)])
DM_equiv = 5. * (np.log10(Delta_dist) - 1.)
'''bias = (mean_bias_coeff[0] * DM_equiv
+ mean_bias_coeff[1] * DM_equiv * DM_equiv
+ mean_bias_coeff[2] * DM_equiv * DM_equiv * DM_equiv
+ mean_bias_coeff[3] * DM_equiv * DM_equiv * DM_equiv * DM_equiv)
sigma = (std_dev_coeff[0]
+ std_dev_coeff[1] * DM_equiv
+ std_dev_coeff[2] * DM_equiv * DM_equiv
+ std_dev_coeff[3] * DM_equiv * DM_equiv * DM_equiv
+ std_dev_coeff[4] * DM_equiv * DM_equiv * DM_equiv * DM_equiv)'''
sigma = sigma_bin * np.ones(DM_equiv.size)
bias = 0.
log_Delta_EBV = np.log(Delta_EBV) + bias
# Calculate mean reddening in each bin
mean_Delta_EBV = Delta_EBV * np.exp(bias + 0.5 * sigma * sigma)
mean_EBV = np.cumsum(mean_Delta_EBV)
# Normalize E(B-V) per kpc locally
'''DM_norm = 5. * (np.log10(norm_dist) - 1.)
DM_fine = np.linspace(DM_norm - 10., DM_norm, 1000)
Delta_DM = DM_fine[1] - DM_fine[0]
EBV_local = np.sum(self.dA_dmu(l, b, DM_fine)) * Delta_DM'''
ds_dmu = 10. * np.log(10.) / 5. * np.power(10., -10./5.)
EBV_local = self.dA_dmu(l, b, -10.) / ds_dmu
norm_dist = 1.
EBV_local *= np.exp(0.5 * sigma[0]**2.)
norm = 0.001 * EBV_per_kpc * norm_dist / EBV_local
#idx = np.max(np.where(dist <= norm_dist, np.arange(dist.size), -1.))
#print idx
#norm = 0.001 * EBV_per_kpc * dist[idx] / mean_EBV[idx]
mean_EBV *= norm
mean_Delta_EBV *= norm
log_Delta_EBV += np.log(norm)
idx = (log_Delta_EBV < -8.)
log_Delta_EBV[idx] = -8.
return DM, log_Delta_EBV, sigma, mean_Delta_EBV, norm
def rotation_matrix(alpha, beta, gamma):
Rx = np.array([[1., 0., 0.],
[0., np.cos(gamma), -np.sin(gamma)],
[0., np.sin(gamma), np.cos(gamma)]])
Ry = np.array([[ np.cos(beta), 0., np.sin(beta)],
[ 0., 1., 0.],
[-np.sin(beta), 0., np.cos(beta)]])
Rz = np.array([[np.cos(gamma), -np.sin(gamma), 0.],
[np.sin(gamma), np.cos(gamma), 0.],
[0., 0., 1.]])
R = np.einsum('ij,jk,kl', Rx, Ry, Rz)
return R
def downsample_by_four(x):
return 0.25 * (x[::4] + x[1::4] + x[2::4] + x[3::4])
def downsample_by_two(x):
return 0.5 * (x[:-1:2] + x[1::2])
def dV_dDM_dOmega(DM):
'''
Volume element of a beam at a given distance, per unit
distance modulus, per steradian.
'''
return (1000.*2.30258509/5.) * np.exp(3.*2.30258509/5. * DM)
def Gaussian(x, mu=0., sigma=1.):
Delta = (x-mu)/sigma
return np.exp(-Delta*Delta/2.) / 2.50662827 / sigma
class TStellarModel:
'''
Loads the given stellar model, and provides access to interpolated
colors on (Mr, FeH) grid.
'''
def __init__(self, template_fname):
self.load_templates(template_fname)
def load_templates(self, template_fname):
'''
Load in stellar template colors from an ASCII file. The colors
should be stored in the following format:
#
# Arbitrary comments
#
# Mr FeH gr ri iz zy
#
-1.00 -2.50 0.5132 0.2444 0.1875 0.0298
-0.99 -2.50 0.5128 0.2442 0.1873 0.0297
...
or something similar. A key point is that there be a row
in the comments that lists the names of the colors. The code
identifies this row by the presence of both 'Mr' and 'FeH' in
the row, as above. The file must be whitespace-delimited, and
any whitespace will do (note that the whitespace is not required
to be regular).
'''
f = open(abspath(template_fname), 'r')
row = []
self.color_name = ['gr', 'ri', 'iz', 'zy']
for l in f:
line = l.rstrip().lstrip()
if len(line) == 0: # Empty line
continue
if line[0] == '#': # Comment
if ('Mr' in line) and ('FeH' in line):
try:
self.color_name = line.split()[3:]
except:
pass
continue
data = line.split()
if len(data) < 6:
print 'Error reading in stellar templates.'
print 'The following line does not have the correct number of entries (6 expected):'
print line
return 0
row.append([float(s) for s in data])
f.close()
template = np.array(row, dtype=np.float64)
# Organize data into record array
dtype = [('Mr','f4'), ('FeH','f4')]
for c in self.color_name:
dtype.append((c, 'f4'))
self.data = np.empty(len(template), dtype=dtype)
self.data['Mr'] = template[:,0]
self.data['FeH'] = template[:,1]
for i,c in enumerate(self.color_name):
self.data[c] = template[:,i+2]
self.MrFeH_bounds = [[np.min(self.data['Mr']), np.max(self.data['Mr'])],
[np.min(self.data['FeH']), np.max(self.data['FeH'])]]
# Produce interpolating class with data
self.Mr_coords = np.unique(self.data['Mr'])
self.FeH_coords = np.unique(self.data['FeH'])
self.interp = {}
for c in self.color_name:
tmp = self.data[c][:]
tmp.shape = (len(self.FeH_coords), len(self.Mr_coords))
self.interp[c] = RectBivariateSpline(self.Mr_coords,
self.FeH_coords,
tmp.T,
kx=3,
ky=3,
s=0)
def color(self, Mr, FeH, name=None):
'''
Return the colors, evaluated at the given points in
(Mr, FeH)-space.
Inputs:
Mr float or array of floats
FeH float or array of floats
name string, or list of strings, with names of colors to
return. By default, all colors are returned.
Output:
color numpy record array of colors
'''
if name == None:
name = self.get_color_names()
elif type(name) == str:
name = [name]
if type(Mr) == float:
Mr = np.array([Mr])
elif type(Mr) == list:
Mr = np.array(Mr)
if type(FeH) == float:
FeH = np.array([FeH])
elif type(FeH) == list:
FeH = np.array(FeH)
dtype = []
for c in name:
if c not in self.color_name:
raise ValueError('No such color in model: %s' % c)
dtype.append((c, 'f4'))
ret_color = np.empty(Mr.size, dtype=dtype)
for c in name:
ret_color[c] = self.interp[c].ev(Mr, FeH)
return ret_color
def absmags(self, Mr, FeH):
'''
Return the absolute magnitude in each bandpass corresponding to
(Mr, FeH).
Inputs:
Mr r-band absolute magnitude of the star(s) (float or numpy array)
FeH Metallicity of the star(s) (float or numpy array)
Output:
M Absolute magnitude in each band for each star (numpy record array)
'''
c = self.color(Mr, FeH)
dtype = [('g','f8'), ('r','f8'), ('i','f8'), ('z','f8'), ('y','f8')]
M = np.empty(c.shape, dtype=dtype)
M['r'] = Mr
M['g'] = c['gr'] + Mr
M['i'] = Mr - c['ri']
M['z'] = M['i'] - c['iz']
M['y'] = M['z'] - c['zy']
return M
def get_color_names(self):
'''
Return the names of the colors in the templates.
Ex.: For PS1 colors, this would return
['gr', 'ri', 'iz', 'zy']
'''
return self.color_name
def get_SFD_map(fname='~/projects/bayestar/data/SFD_Ebv_512.fits', nside=64):
import pyfits
import healpy as hp
fname = expanduser(fname)
f = pyfits.open(fname)
EBV_ring = f[0].data[:]
f.close()
EBV_nest = hp.reorder(EBV_ring, r2n=True)
nside2_map = EBV_nest.size / 12
while nside2_map > nside * nside:
EBV_nest = downsample_by_four(EBV_nest)
nside2_map = EBV_nest.size / 12
#hp.mollview(np.log10(EBV_nest), nest=True)
#plt.show()
return EBV_nest
def min_max(x):
return np.min(x), np.max(x)
def plot_EBV_prior(nside=64):
import healpy as hp
model = TGalacticModel()
n = np.arange(12 * nside**2)
EBV = np.empty(n.size)
norm = np.empty(n.size)
for i in n:
t, p = hp.pixelfunc.pix2ang(nside, i, nest=True)
l = 180./np.pi * p
b = 90. - 180./np.pi * t
DM, log_Delta_EBV, sigma_log_Delta_EBV, mean_Delta_EBV, norm_tmp = model.EBV_prior(l, b)
EBV[i] = np.sum(mean_Delta_EBV)
norm[i] = norm_tmp
#print '(%.3f, %.3f): %.3f' % (l, b, EBV[i])
print ''
print np.mean(norm), np.std(norm)
# Compare to SFD
EBV_SFD = get_SFD_map(nside=nside)
#print np.mean(EBV_SFD / EBV)
#print np.mean(EBV_SFD) / np.mean(EBV)
#print np.std(EBV_SFD), np.std(EBV)
# Normalize for b < 10, l > 10
t, p = hp.pixelfunc.pix2ang(nside, n, nest=True)
l = 180./np.pi * p
b = 90. - 180./np.pi * t
idx = (b < 15.) & (l > 10.)
norm = np.median(EBV_SFD[idx]) / np.median(EBV[idx])
EBV *= norm
idx = (b > 45.)
bias = np.median(EBV_SFD[idx]) - np.median(EBV[idx])
EBV += bias
print 'Normalization = %.4g' % norm
print 'Additive const. = %.4g' % bias
vmin, vmax = min_max(np.log10(EBV_SFD))
mplib.rc('text', usetex=True)
hp.visufunc.mollview(np.log10(EBV), nest=True,
title=r'$\log_{10} \mathrm{E} \left( B - V \right)$',
min=vmin, max=vmax)
hp.visufunc.mollview(np.log10(EBV_SFD), nest=True,
title=r'$\log_{10} \mathrm{E} \left( B - V \right)_{\mathrm{SFD}}$',
min=vmin, max=vmax)
#hp.visufunc.mollview(EBV, nest=True, max=10.)
hp.visufunc.mollview(np.log10(EBV_SFD) - np.log10(EBV), nest=True,
title=r'$\mathrm{SFD} - \mathrm{smooth \ model}$')
plt.show()
def Monte_Carlo_EBV_prior(nside=64, n_regions=20):
import healpy as hp
model = TGalacticModel()
n = np.arange(12 * nside**2)
t, p = hp.pixelfunc.pix2ang(nside, n, nest=True)
l = 180./np.pi * p
b = 90. - 180./np.pi * t
log_Delta_EBV = np.empty((n.size, n_regions+1), dtype='f8')
# Determine log(Delta EBV) in each pixel and bin
for i,ll,bb in zip(n,l,b):
DM, log_Delta_EBV_tmp, sigma_log_Delta_EBV, mean_Delta_EBV, norm_tmp = model.EBV_prior(ll, bb, n_regions=n_regions)
log_Delta_EBV[i,:] = log_Delta_EBV_tmp[:]
# Simulate a sets of maps with different scatters in the bins
n_maps = 4
sigma = np.logspace(-2, 3, 11, base=2)
EBV = np.empty((sigma.size, n_maps, n.size), dtype='f8')
for k,s in enumerate(sigma):
for i in xrange(n_maps):
scatter = s * np.random.normal(size=(n.size, n_regions+1))
EBV[k,i,:] = np.sum(np.exp(log_Delta_EBV + scatter), axis=1)
for s,m in zip(sigma, EBV[:,0,:]):
hp.visufunc.mollview(np.log10(m), nest=True, title=r'$\sigma = %.2f$' % s)
# Exclude Galactic center
idx = (b > 20.)
#idx = ~((np.abs(l) < 90.) & (b < 10.))
#idx = (l > 0.) & (l < 50.) & (b > 40.) & (b < 50.)
EBV = EBV[:,:,idx]
# Load SFD
EBV_SFD = get_SFD_map(nside=512)
n = np.arange(12 * 512**2)
t, p = hp.pixelfunc.pix2ang(512, n, nest=True)
l = 180./np.pi * p
b = 90. - 180./np.pi * t
idx = (b > 20.)
#idx = ~((np.abs(l) < 90.) & (b < 10.))
#idx = (l > 00.) & (l < 50.) & (b > 40.) & (b < 50.)
EBV_SFD = EBV_SFD[idx]
# Compare variance of prior with SFD
EBV = np.reshape(EBV, (sigma.size, EBV.size/sigma.size))
std_log = np.std(np.log10(EBV), axis=1)
std_SFD = np.std(np.log10(EBV_SFD))
for sig,std in zip(sigma, std_log):
print 'bin: %.2f map: %.2f SFD: %.2f' % (sig, std, std_SFD)
plt.show()
def test_EBV_prior(l, b, nside=64):
model = TGalacticModel()
radius = 0.1
'''
N_thin = model.tot_num_stars(l, b, radius=radius, component='thin')
N_thick = model.tot_num_stars(l, b, radius=radius, component='thick')
N_halo = model.tot_num_stars(l, b, radius=radius, component='halo')
print '# of stars in thin disk: %d' % N_thin
print ' " in thick disk: %d' % N_thick
print ' " in halo: %d' % N_halo
'''
DM, log_Delta_EBV, sigma_log_Delta_EBV, mean_Delta_EBV, norm = model.EBV_prior(l, b)
dist = np.power(10., DM / 5. - 2.) # in kpc
for d, mean, sigma in zip(dist, log_Delta_EBV, sigma_log_Delta_EBV):
print 'd = %.3f: %.3f +- %.3f --> Delta E(B-V) = %.3f' % (d, mean, sigma, np.exp(mean + 0.5 * sigma * sigma))
print 'E(B-V) = %.3f' % np.sum(mean_Delta_EBV)
#plot_EBV_prior(model, nside=nside)
def plot_EBV_prior_profile(l, b):
model = TGalacticModel()
s = np.linspace(0., 25000., 1000)
#print l, b
DM = 5. * np.log10(s/10.)
r, z = model.gal_2_cylindrical(l, b, DM)
dA_ds = model.rho_ISM(r, z)
A = np.cumsum(dA_ds)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(s, A)
ax.plot(s, dA_ds * np.max(A) / np.max(dA_ds))
plt.show()
def print_rho(l, b):
model = TGalacticModel()
for DM in np.linspace(0., 20., 21):
print 'rho(DM = %.1f) = %.5f' % (DM, model.dA_dmu(l, b, DM) / np.power(10., DM / 5.))
def ndmesh(*args):
args = map(np.asarray,args)
return np.broadcast_arrays(*[x[(slice(None),)+(None,)*i] for i, x in enumerate(args)])
def test_plot_bulge():
m = TGalacticModel()
m.L_epsilon = 500.
'''
x = np.linspace(-5000., 5000., 1000)
y = np.zeros(1000)
z = np.zeros(1000)
rho = m.rho_bulge(x, y, z)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, rho)
plt.show()
'''
x = np.linspace(-10000., 10000., 150)
y = np.linspace(-10000., 10000., 150)
z = np.linspace(-10000., 10000., 150)
x, y, z = ndmesh(x, y, z)
rho_disk = m.rho_xyz(x, y, z, component='disk')
rho_halo = m.rho_xyz(x, y, z, component='halo')
rho_bulge = m.rho_xyz(x, y, z, component='bulge')
rho = [rho_disk, rho_halo, rho_bulge, rho_disk+rho_halo+rho_bulge, rho_bulge / (rho_disk+rho_halo+rho_bulge)]
n_disk = np.sum(rho_disk)
n_halo = np.sum(rho_halo)
n_bulge = np.sum(rho_bulge)
n_tot = n_disk + n_halo + n_bulge
print 'Disk fraction: %.3g' % (n_disk / n_tot)
print 'Halo fraction: %.3g' % (n_halo / n_tot)
print 'Bulge fraction: %.3g' % (n_bulge / n_tot)
fig = plt.figure()
vmax = np.max(np.sum(rho_disk, axis=1))
vmin = np.percentile(np.sum(rho_disk, axis=1), 5.)
for n_dim in xrange(3):
for n_comp, comp in enumerate(rho):
ax = fig.add_subplot(len(rho), 3, 1+3*n_comp+n_dim)
ax.imshow(np.log(np.sum(comp, axis=2-n_dim)), origin='lower', interpolation='none',
vmin=np.log(vmin), vmax=np.log(vmax),
extent=(-10., 10., -10., 10.))
plt.show()
'''
from mayavi import mlab
s = mlab.pipeline.scalar_field(rho_disk+rho_halo+rho_bulge)
mlab.pipeline.image_plane_widget(s, plane_orientation='x_axes', slice_index=10)
mlab.pipeline.image_plane_widget(s, plane_orientation='y_axes', slice_index=10)
mlab.pipeline.image_plane_widget(s, plane_orientation='z_axes', slice_index=10)
#mlab.pipeline.volume(s)
mlab.show()
'''
def test_bug():
A = np.random.random((3, 3))
x = np.random.random((3, 4, 2))
x_prime = np.einsum('...ij,j...->i...', A, x)
x_prime_2 = np.einsum('ij,jkl->ikl', A, x)
print np.allclose(x_prime, x_prime_2)
print A
print x
print x_prime
print x_prime.shape
def main():
#test_bug()
#l, b = 45.3516, 1.41762
test_plot_bulge()
#print_rho(l, b)
#plot_EBV_prior_profile(l, b)
#test_EBV_prior(l, b)
#plot_EBV_prior_profile(90, 0.)
#plot_EBV_prior_profile(10., -20.)
'''test_EBV_prior(0., 0., nside=32)
test_EBV_prior(90., 0., nside=32)
test_EBV_prior(30., 10., nside=32)
test_EBV_prior(-10., 70., nside=32)
test_EBV_prior(-10., 20., nside=32)'''
#plot_EBV_prior(nside=16)
'''
for n_regions in [10, 20, 30, 40]:
print '# of regions: %d' % n_regions
Monte_Carlo_EBV_prior(nside=32, n_regions=n_regions)
print ''
'''
return 0
if __name__ == '__main__':
main()
|
PennyQ/stero_3D_dust_map
|
model.py
|
Python
|
gpl-3.0
| 31,645
|
[
"Gaussian",
"Mayavi"
] |
45105e575d0d22f0bee977a8c550e8084b99c675a2004194a6cfe4855b827f13
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2006 Brian Matherly
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Mime utility functions for the MS Windows platform
"""
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import os
from winreg import *
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from . import _pythonmime
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
def get_description(mime_type):
"""Return the description of the specfied mime type"""
desc = None
extension = _get_extension(mime_type)
progid = _get_prog_id(extension)
if progid:
try:
hcr = ConnectRegistry(None, HKEY_CLASSES_ROOT)
desc = QueryValue(hcr, progid)
CloseKey(hcr)
except WindowsError:
pass
if not desc:
desc = _("unknown")
return desc
def get_type(file):
"""Return the mime type of the specified file"""
return _pythonmime.get_type(file)
def mime_type_is_defined(mime_type):
"""
Return True if a description for a mime type exists.
"""
extension = _get_extension(mime_type)
if extension:
return True
else:
return _pythonmime.mime_type_is_defined(mime_type)
#-------------------------------------------------------------------------
#
# private functions
#
#-------------------------------------------------------------------------
def _get_extension(mime_type):
"""
Return the extension associated with this mime type
Return None if no association exists
"""
extension = None
try:
hcr = ConnectRegistry(None, HKEY_CLASSES_ROOT)
subkey = OpenKey(hcr, "MIME\DataBase\Content Type")
mimekey = OpenKey(subkey, mime_type)
extension, value_type = QueryValueEx(mimekey, "Extension")
CloseKey(mimekey)
CloseKey(subkey)
CloseKey(hcr)
except WindowsError:
extension = None
if not extension:
# Work around for Windows mime problems
extmap = {
'application/abiword' : '.abw',
'application/rtf' : '.rtf',
}
if mime_type in extmap:
extension = extmap[mime_type]
return extension
def _get_prog_id(extension):
"""
Return the program ID associated with this extension
Return None if no association exists
"""
if not extension:
return None
try:
hcr = ConnectRegistry(None, HKEY_CLASSES_ROOT)
progid = QueryValue(hcr, extension)
CloseKey(hcr)
return progid
except WindowsError:
return None
|
beernarrd/gramps
|
gramps/gen/mime/_winmime.py
|
Python
|
gpl-2.0
| 3,541
|
[
"Brian"
] |
d955b4f93214cc35234c7f44f33e31e3f6b520fa9285e4868381c4594a738b52
|
#
# Copyright (C) 2013-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest as ut
import unittest_decorators as utx
import espressomd
from espressomd.interactions import FeneBond
from espressomd import pair_criteria
class PairCriteria(ut.TestCase):
"""Tests interface and implementation of pair criteria"""
es = espressomd.System(box_l=[1., 1., 1.])
f1 = FeneBond(k=1, d_r_max=0.05)
es.bonded_inter.add(f1)
f2 = FeneBond(k=1, d_r_max=0.05)
es.bonded_inter.add(f2)
es.part.add(id=0, pos=(0, 0, 0))
es.part.add(id=1, pos=(0.91, 0, 0))
p1 = es.part[0]
p2 = es.part[1]
epsilon = 1E-8
def test_distance_crit_periodic(self):
dc = pair_criteria.DistanceCriterion(cut_off=0.1)
# Interface
self.assertEqual(list(dc.get_params().keys()), ["cut_off", ])
self.assertTrue(abs(dc.get_params()["cut_off"] - 0.1) < self.epsilon)
# Decisions
# Periodic system. Particles in range via minimum image convention
self.es.periodicity = (1, 1, 1)
self.assertTrue(dc.decide(self.p1, self.p2))
self.assertTrue(dc.decide(self.p1.id, self.p2.id))
def test_distance_crit_non_periodic(self):
dc = pair_criteria.DistanceCriterion(cut_off=0.1)
# Non-periodic system. Particles out of range
self.es.periodicity = (0, 0, 0)
self.assertTrue(not dc.decide(self.p1, self.p2))
self.assertTrue(not dc.decide(self.p1.id, self.p2.id))
@utx.skipIfMissingFeatures("LENNARD_JONES")
def test_energy_crit(self):
# Setup purely repulsive lj
self.es.non_bonded_inter[0, 0].lennard_jones.set_params(
sigma=0.11, epsilon=1, cutoff=2**(1. / 6.) * 0.11, shift="auto")
ec = pair_criteria.EnergyCriterion(cut_off=0.001)
# Interface
self.assertEqual(list(ec.get_params().keys()), ["cut_off", ])
self.assertTrue(abs(ec.get_params()["cut_off"] - 0.001) < self.epsilon)
# Decisions
# Periodic system. Particles in range via minimum image convention
self.es.periodicity = (1, 1, 1)
self.assertTrue(ec.decide(self.p1, self.p2))
self.assertTrue(ec.decide(self.p1.id, self.p2.id))
@utx.skipIfMissingFeatures(["LENNARD_JONES"])
def test_energy_crit_non_periodic(self):
# Setup purely repulsive lj
self.es.non_bonded_inter[0, 0].lennard_jones.set_params(
sigma=0.11, epsilon=1, cutoff=2**(1. / 6.) * 0.11, shift="auto")
ec = pair_criteria.EnergyCriterion(cut_off=0.001)
# Interface
self.assertEqual(list(ec.get_params().keys()), ["cut_off", ])
self.assertTrue(abs(ec.get_params()["cut_off"] - 0.001) < self.epsilon)
# Non-periodic system. Particles out of range
self.es.periodicity = (0, 0, 0)
self.assertTrue(not ec.decide(self.p1, self.p2))
self.assertTrue(not ec.decide(self.p1.id, self.p2.id))
def test_bond_crit(self):
bc = pair_criteria.BondCriterion(bond_type=0)
# Interface
self.assertEqual(list(bc.get_params().keys()), ["bond_type", ])
self.assertEqual(bc.get_params()["bond_type"], 0)
# Decisions
# No bond yet. Should return false
self.assertTrue(not bc.decide(self.p1, self.p2))
self.assertTrue(not bc.decide(self.p1.id, self.p2.id))
# Add bond. Then the criterion should match
self.es.part[0].bonds = ((0, 1),)
self.assertTrue(bc.decide(self.p1, self.p2))
self.assertTrue(bc.decide(self.p1.id, self.p2.id))
# Place bond on the 2nd particle. The criterion should still match
self.es.part[0].bonds = ()
self.es.part[1].bonds = ((0, 0),)
self.assertTrue(bc.decide(self.p1, self.p2))
self.assertTrue(bc.decide(self.p1.id, self.p2.id))
if __name__ == "__main__":
ut.main()
|
mkuron/espresso
|
testsuite/python/pair_criteria.py
|
Python
|
gpl-3.0
| 4,508
|
[
"ESPResSo"
] |
538f74f9315f3fda5f96cec221300f7c80a32106370ad6e0c2071d20b5b2277b
|
#!/usr/bin/env python
import sys, os, time, random, pygame
import RPi.GPIO as GPIO
def speel(bestand):
pygame.mixer.music.load(bestand)
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
def speelSom(getal1, getal2):
print("Wat is", getal1, "x", getal2,"?")
speel("audio/" + str(getal1) + ".mp3")
speel("audio/keer.mp3")
speel("audio/" + str(getal2) + ".mp3")
speel("audio/is.mp3")
def getNummer():
nPulsen = 0
# Wacht op draaischijf / knop
schijfContact = GPIO.input(SCHIJFPIN)
aardContact = GPIO.input(AARDPIN) # Laag als ingedrukt!!!
while schijfContact == False and aardContact == True:
schijfContact = GPIO.input(SCHIJFPIN)
aardContact = GPIO.input(AARDPIN)
# Aardtoets ingedrukt
if aardContact == False:
return -1
# Afhandelen van pulsen
klaar = False
while klaar == False and schijfContact == True:
nPulsen = nPulsen + 1
startTijd = time.time()
time.sleep(0.1)
schijfContact = GPIO.input(SCHIJFPIN)
# Controleer tijd tussen twee pulsen
while klaar == False and schijfContact == False:
if time.time() - startTijd >= 0.2:
klaar = True
schijfContact = GPIO.input(SCHIJFPIN)
return nPulsen % 10
def hoornCallback(channel):
print("Hoorn!", channel)
# herstart het hele script
GPIO.cleanup()
python = sys.executable
os.execl(python, python, * sys.argv)
SCHIJFPIN = 25
AARDPIN = 23
HOORNPIN = 24
GPIO.setmode(GPIO.BCM)
GPIO.setup(SCHIJFPIN, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(HOORNPIN, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.setup(AARDPIN, GPIO.IN, pull_up_down = GPIO.PUD_UP)
# Gebruik een interrupt voor de hoorn,
# omdat deze op elk moment kan worden neergelegd
GPIO.add_event_detect(HOORNPIN, GPIO.BOTH, callback = hoornCallback)
# Initiliseer audio mixer
pygame.mixer.init()
while True:
try:
# Wacht op hoorn
print("Wacht op hoorn...")
hoornContact = GPIO.input(HOORNPIN)
while hoornContact == True:
hoornContact = GPIO.input(HOORNPIN)
time.sleep (1)
# Welk tafeltje oefenen?
print("Welk tafeltje?")
speel("audio/welk.mp3")
tafeltje = getNummer()
# Lijst om bij te houden welke sommen goed/fout beantwoord zijn
sommen = []
for som in range(10):
sommen.append(0) # 0 is fout, 1 is goed
aantalGoed = 0
while aantalGoed < 10:
# Bepaal opgave
getal1 = random.randint(1,10)
while sommen[getal1 - 1] == 1:
getal1 = random.randint(1,10)
if tafeltje == -1:
getal2 = random.randint(1,10)
elif tafeltje == 0:
getal2 = 10
else:
getal2 = tafeltje
uitkomst = getal1 * getal2
nCijfers = len(str(uitkomst))
speelSom(getal1, getal2)
huidigCijfer = 0
antwoord = ""
# Wacht op antwoord
while huidigCijfer < nCijfers:
nummer = getNummer()
if nummer > -1: # aan schijf gedraaid
antwoord = antwoord + str(nummer)
huidigCijfer = huidigCijfer + 1
else: # aardtoets ingedrukt
speelSom(getal1, getal2)
huidigCijfer = 0
antwoord = ""
print(antwoord)
# Controleer antwoord
if int(antwoord) == uitkomst:
aantalGoed = aantalGoed + 1
sommen[getal1 - 1] = 1
print("Goed zo!")
speel("audio/goed.mp3")
else:
print("Jammer, de juiste uitkomst is", uitkomst)
speel("audio/fout.mp3")
speel("audio/" + str(uitkomst) + ".mp3")
print()
time.sleep(1)
speel("audio/einde.mp3")
except KeyboardInterrupt: # Ctrl+C
GPIO.cleanup()
|
ralphcrutzen/PTT-Tafeltjes-Telefoon
|
t65.py
|
Python
|
gpl-3.0
| 4,080
|
[
"Elk"
] |
543e51eb6d07a07e1240db74b93bebdb0da1312d882b2efb4b8eb2d43b827a76
|
"""
Signals and Systems Function Module
Copyright (c) March 2017, Mark Wickert
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
Notes
-----
The primary purpose of this function library is to support the book Signals and Systems for Dummies. Beyond that it should be useful to anyone who wants to use Pylab for general signals and systems modeling and simulation. There is a good collection of digital communication simulation primitives included in the library. More enhancements are planned over time.
The formatted docstrings for the library follow. Click index in the upper right to get an
alphabetical listing of the library functions. In all of the example code given it is assumed that ssd has been imported into your workspace. See the examples below for import options.
Examples
--------
>>> import sk_dsp_comm.sigsys as ssd
>>> # Commands then need to be prefixed with ssd., i.e.,
>>> ssd.tri(t,tau)
>>> # A full import of the module, to avoid the the need to prefix with ssd, is:
>>> from sk_dsp_comm.sigsys import *
Function Catalog
----------------
"""
from matplotlib import pylab
import numpy as np
from numpy import fft
import matplotlib.pyplot as plt
from scipy import signal
from scipy.io import wavfile
from logging import getLogger
log = getLogger(__name__)
import warnings
def cic(m, k):
"""
A functional form implementation of a cascade of integrator comb (CIC) filters.
Parameters
----------
m : Effective number of taps per section (typically the decimation factor).
k : The number of CIC sections cascaded (larger K gives the filter a wider image rejection bandwidth).
Returns
-------
b : FIR filter coefficients for a simple direct form implementation using the filter() function.
Notes
-----
Commonly used in multirate signal processing digital down-converters and digital up-converters. A true CIC filter
requires no multiplies, only add and subtract operations. The functional form created here is a simple FIR requiring
real coefficient multiplies via filter().
Mark Wickert July 2013
"""
if k == 1:
b = np.ones(m)
else:
h = np.ones(m)
b = h
for i in range(1, k):
b = signal.convolve(b, h) # cascade by convolving impulse responses
# Make filter have unity gain at DC
return b / np.sum(b)
def ten_band_eq_filt(x,GdB,Q=3.5):
"""
Filter the input signal x with a ten-band equalizer having octave gain values in ndarray GdB.
The signal x is filtered using octave-spaced peaking filters starting at 31.25 Hz and
stopping at 16 kHz. The Q of each filter is 3.5, but can be changed. The sampling rate
is assumed to be 44.1 kHz.
Parameters
----------
x : ndarray of the input signal samples
GdB : ndarray containing ten octave band gain values [G0dB,...,G9dB]
Q : Quality factor vector for each of the NB peaking filters
Returns
-------
y : ndarray of output signal samples
Examples
--------
>>> # Test with white noise
>>> w = randn(100000)
>>> y = ten_band_eq_filt(x,GdB)
>>> psd(y,2**10,44.1)
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**np.arange(NB)
B = np.zeros((NB,3))
A = np.zeros((NB,3))
# Create matrix of cascade coefficients
for k in range(NB):
[b,a] = peaking(GdB[k],Fc[k],Q)
B[k,:] = b
A[k,:] = a
# Pass signal x through the cascade of ten filters
y = np.zeros(len(x))
for k in range(NB):
if k == 0:
y = signal.lfilter(B[k,:],A[k,:],x)
else:
y = signal.lfilter(B[k,:],A[k,:],y)
return y
def ten_band_eq_resp(GdB,Q=3.5):
"""
Create a frequency response magnitude plot in dB of a ten band equalizer
using a semilogplot (semilogx()) type plot
Parameters
----------
GdB : Gain vector for 10 peaking filters [G0,...,G9]
Q : Quality factor for each peaking filter (default 3.5)
Returns
-------
Nothing : two plots are created
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> ss.ten_band_eq_resp([0,10.0,0,0,-1,0,5,0,-4,0])
>>> plt.show()
"""
fs = 44100.0 # Hz
NB = len(GdB)
if not NB == 10:
raise ValueError("GdB length not equal to ten")
Fc = 31.25*2**np.arange(NB)
B = np.zeros((NB,3));
A = np.zeros((NB,3));
# Create matrix of cascade coefficients
for k in range(NB):
b,a = peaking(GdB[k],Fc[k],Q,fs)
B[k,:] = b
A[k,:] = a
# Create the cascade frequency response
F = np.logspace(1,np.log10(20e3),1000)
H = np.ones(len(F))*np.complex(1.0,0.0)
for k in range(NB):
w,Htemp = signal.freqz(B[k,:],A[k,:],2*np.pi*F/fs)
H *= Htemp
plt.figure(figsize=(6,4))
plt.subplot(211)
plt.semilogx(F,20*np.log10(abs(H)))
plt.axis([10, fs/2, -12, 12])
plt.grid()
plt.title('Ten-Band Equalizer Frequency Response')
plt.xlabel('Frequency (Hz)')
plt.ylabel('Gain (dB)')
plt.subplot(212)
plt.stem(np.arange(NB),GdB,'b','bs')
#plt.bar(np.arange(NB)-.1,GdB,0.2)
plt.axis([0, NB-1, -12, 12])
plt.xlabel('Equalizer Band Number')
plt.ylabel('Gain Set (dB)')
plt.grid()
def peaking(GdB, fc, Q=3.5, fs=44100.):
"""
A second-order peaking filter having GdB gain at fc and approximately
and 0 dB otherwise.
The filter coefficients returns correspond to a biquadratic system function
containing five parameters.
Parameters
----------
GdB : Lowpass gain in dB
fc : Center frequency in Hz
Q : Filter Q which is inversely proportional to bandwidth
fs : Sampling frquency in Hz
Returns
-------
b : ndarray containing the numerator filter coefficients
a : ndarray containing the denominator filter coefficients
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import peaking
>>> from scipy import signal
>>> b,a = peaking(2.0,500)
>>> f = np.logspace(1,5,400)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
>>> plt.show()
>>> b,a = peaking(-5.0,500,4)
>>> w,H = signal.freqz(b,a,2*np.pi*f/44100)
>>> plt.semilogx(f,20*np.log10(abs(H)))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
"""
mu = 10**(GdB/20.)
kq = 4/(1 + mu)*np.tan(2*np.pi*fc/fs/(2*Q))
Cpk = (1 + kq *mu)/(1 + kq)
b1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq*mu)
b2 = (1 - kq*mu)/(1 + kq*mu)
a1 = -2*np.cos(2*np.pi*fc/fs)/(1 + kq)
a2 = (1 - kq)/(1 + kq)
b = Cpk*np.array([1, b1, b2])
a = np.array([1, a1, a2])
return b,a
def ex6_2(n):
"""
Generate a triangle pulse as described in Example 6-2
of Chapter 6.
You need to supply an index array n that covers at least [-2, 5].
The function returns the hard-coded signal of the example.
Parameters
----------
n : time index ndarray covering at least -2 to +5.
Returns
-------
x : ndarray of signal samples in x
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(-5,8)
>>> x = ss.ex6_2(n)
>>> plt.stem(n,x) # creates a stem plot of x vs n
"""
x = np.zeros(len(n))
for k, nn in enumerate(n):
if nn >= -2 and nn <= 5:
x[k] = 8 - nn
return x
def position_cd(Ka, out_type ='fb_exact'):
"""
CD sled position control case study of Chapter 18.
The function returns the closed-loop and open-loop
system function for a CD/DVD sled position control
system. The loop amplifier gain is the only variable
that may be changed. The returned system function can
however be changed.
Parameters
----------
Ka : loop amplifier gain, start with 50.
out_type : 'open_loop' for open loop system function
out_type : 'fb_approx' for closed-loop approximation
out_type : 'fb_exact' for closed-loop exact
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
With the exception of the loop amplifier gain, all
other parameters are hard-coded from Case Study example.
Examples
--------
>>> b,a = position_cd(Ka,'fb_approx')
>>> b,a = position_cd(Ka,'fb_exact')
"""
rs = 10/(2*np.pi)
# Load b and a ndarrays with the coefficients
if out_type.lower() == 'open_loop':
b = np.array([Ka*4000*rs])
a = np.array([1,1275,31250,0])
elif out_type.lower() == 'fb_approx':
b = np.array([3.2*Ka*rs])
a = np.array([1, 25, 3.2*Ka*rs])
elif out_type.lower() == 'fb_exact':
b = np.array([4000*Ka*rs])
a = np.array([1, 1250+25, 25*1250, 4000*Ka*rs])
else:
raise ValueError('out_type must be: open_loop, fb_approx, or fc_exact')
return b, a
def cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H'):
"""
Cruise control with PI controller and hill disturbance.
This function returns various system function configurations
for a the cruise control Case Study example found in
the supplementary article. The plant model is obtained by the
linearizing the equations of motion and the controller contains a
proportional and integral gain term set via the closed-loop parameters
natural frequency wn (rad/s) and damping zeta.
Parameters
----------
wn : closed-loop natural frequency in rad/s, nominally 0.1
zeta : closed-loop damping factor, nominally 1.0
T : vehicle time constant, nominally 10 s
vcruise : cruise velocity set point, nominally 75 mph
vmax : maximum vehicle velocity, nominally 120 mph
tf_mode : 'H', 'HE', 'HVW', or 'HED' controls the system function returned by the function
'H' : closed-loop system function V(s)/R(s)
'HE' : closed-loop system function E(s)/R(s)
'HVW' : closed-loop system function V(s)/W(s)
'HED' : closed-loop system function E(s)/D(s), where D is the hill disturbance input
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Examples
--------
>>> # return the closed-loop system function output/input velocity
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='H')
>>> # return the closed-loop system function loop error/hill disturbance
>>> b,a = cruise_control(wn,zeta,T,vcruise,vmax,tf_mode='HED')
"""
tau = T/2.*vmax/vcruise
g = 9.8
g *= 3*60**2/5280. # m/s to mph conversion
Kp = T*(2*zeta*wn-1/tau)/vmax
Ki = T*wn**2./vmax
K = Kp*vmax/T
wn = np.sqrt(K/(Kp/Ki))
zeta = (K + 1/tau)/(2*wn)
log.info('wn = %s' % (wn))
log.info('zeta = %s' % (zeta))
a = np.array([1, 2*zeta*wn, wn**2])
if tf_mode == 'H':
b = np.array([K, wn**2])
elif tf_mode == 'HE':
b = np.array([1, 2*zeta*wn-K, 0.])
elif tf_mode == 'HVW':
b = np.array([ 1, wn**2/K+1/tau, wn**2/(K*tau)])
b *= Kp
elif tf_mode == 'HED':
b = np.array([g, 0])
else:
raise ValueError('tf_mode must be: H, HE, HVU, or HED')
return b, a
def splane(b,a,auto_scale=True,size=[-1,1,-1,1]):
"""
Create an s-plane pole-zero plot.
As input the function uses the numerator and denominator
s-domain system function coefficient ndarrays b and a respectively.
Assumed to be stored in descending powers of s.
Parameters
----------
b : numerator coefficient ndarray.
a : denominator coefficient ndarray.
auto_scale : True
size : [xmin,xmax,ymin,ymax] plot scaling when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> splane(b,a)
>>> # Here the plot is generated using manual scaling
>>> splane(b,a,False,[-10,1,-10,10])
"""
if (isinstance(a,int) or isinstance(a,float)):
a = [a]
if (isinstance(b,int) or isinstance(b,float)):
b = [b]
M = len(b) - 1
N = len(a) - 1
plt.figure(figsize=(5,5))
#plt.axis('equal')
N_roots = np.array([0.0])
if M > 0:
N_roots = np.roots(b)
D_roots = np.array([0.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
size[0] = min(np.min(np.real(N_roots)),np.min(np.real(D_roots)))-0.5
size[1] = max(np.max(np.real(N_roots)),np.max(np.real(D_roots)))+0.5
size[1] = max(size[1],0.5)
size[2] = min(np.min(np.imag(N_roots)),np.min(np.imag(D_roots)))-0.5
size[3] = max(np.max(np.imag(N_roots)),np.max(np.imag(D_roots)))+0.5
plt.plot([size[0],size[1]],[0,0],'k--')
plt.plot([0,0],[size[2],size[3]],'r--')
# Plot labels if multiplicity greater than 1
x_scale = size[1]-size[0]
y_scale = size[3]-size[2]
x_off = 0.03
y_off = 0.01
if M > 0:
#N_roots = np.roots(b)
N_uniq, N_mult=signal.unique_roots(N_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0]
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
if N > 0:
#D_roots = np.roots(a)
D_uniq, D_mult=signal.unique_roots(D_roots,tol=1e-3, rtype='avg')
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0]
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis(np.array(size))
return M,N
def os_filter(x, h, N, mode=0):
"""
Overlap and save transform domain FIR filtering.
This function implements the classical overlap and save method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> from numpy import arange, cos, pi, ones
>>> n = arange(0,100)
>>> x = cos(2*pi*0.05*n)
>>> b = ones(10)
>>> y = os_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = os_filter(x,h,N,1)
"""
P = len(h)
# zero pad start of x so first frame can recover first true samples of x
x = np.hstack((np.zeros(P-1),x))
L = N - P + 1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad end of x to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(int(Nframe*N))
# create an instrumentation matrix to observe the overlap and save behavior
y_mat = np.zeros((Nframe,int(Nframe*N)))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:k*L+N]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk)) # imag part should be zero
y[k*L+P-1:k*L+N] = yk[P-1:]
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[P-1:Nx], y_mat[:,P-1:Nx]
else:
return y[P-1:Nx]
def oa_filter(x, h, N, mode=0):
"""
Overlap and add transform domain FIR filtering.
This function implements the classical overlap and add method of
transform domain filtering using a length P FIR filter.
Parameters
----------
x : input signal to be filtered as an ndarray
h : FIR filter coefficients as an ndarray of length P
N : FFT size > P, typically a power of two
mode : 0 or 1, when 1 returns a diagnostic matrix
Returns
-------
y : the filtered output as an ndarray
y_mat : an ndarray whose rows are the individual overlap outputs.
Notes
-----
y_mat is used for diagnostics and to gain understanding of the algorithm.
Examples
--------
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import oa_filter
>>> n = np.arange(0,100)
>>> x = np.cos(2*np.pi*0.05*n)
>>> b = np.ones(10)
>>> y = oa_filter(x,h,N)
>>> # set mode = 1
>>> y, y_mat = oa_filter(x,h,N,1)
"""
P = len(h)
L = int(N) - P + 1 # need N >= L + P -1
Nx = len(x)
Nframe = int(np.ceil(Nx/float(L)))
# zero pad to full number of frames needed
x = np.hstack((x,np.zeros(Nframe*L-Nx)))
y = np.zeros(Nframe*N)
# create an instrumentation matrix to observe the overlap and add behavior
y_mat = np.zeros((Nframe,Nframe*N))
H = fft.fft(h,N)
# begin the filtering operation
for k in range(Nframe):
xk = x[k*L:(k+1)*L]
Xk = fft.fft(xk,N)
Yk = H*Xk
yk = np.real(fft.ifft(Yk))
y[k*L:k*L+N] += yk
y_mat[k,k*L:k*L+N] = yk
if mode == 1:
return y[0:Nx], y_mat[:,0:Nx]
else:
return y[0:Nx]
def lp_samp(fb,fs,fmax,N,shape='tri',fsize=(6,4)):
"""
Lowpass sampling theorem plotting function.
Display the spectrum of a sampled signal after setting the bandwidth,
sampling frequency, maximum display frequency, and spectral shape.
Parameters
----------
fb : spectrum lowpass bandwidth in Hz
fs : sampling frequency in Hz
fmax : plot over [-fmax,fmax]
shape : 'tri' or 'line'
N : number of translates, N positive and N negative
fsize : the size of the figure window, default (6,4)
Returns
-------
Nothing : A plot window opens containing the spectrum plot
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.sigsys import lp_samp
No aliasing as bandwidth 10 Hz < 25/2; fs > fb.
>>> lp_samp(10,25,50,10)
>>> plt.show()
Now aliasing as bandwidth 15 Hz > 25/2; fs < fb.
>>> lp_samp(15,25,50,10)
"""
plt.figure(figsize=fsize)
# define the plot interval
f = np.arange(-fmax,fmax+fmax/200.,fmax/200.)
A = 1.0
line_ampl = A/2.*np.array([0, 1])
# plot the lowpass spectrum in black
shapes = ['tri', 'line']
if shape.lower() not in shapes:
raise ValueError('shape must be tri or line')
if shape.lower() == 'tri':
plt.plot(f,lp_tri(f,fb))
# overlay positive and negative frequency translates
for n in range(N):
plt.plot(f, lp_tri(f - (n + 1) * fs, fb), '--r')
plt.plot(f, lp_tri(f + (n + 1) * fs, fb), '--g')
elif shape.lower() == 'line':
plt.plot([fb, fb],line_ampl,'b', linewidth=2)
plt.plot([-fb, -fb],line_ampl,'b', linewidth=2)
# overlay positive and negative frequency translates
for n in range(N):
plt.plot([fb+(n+1)*fs, fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([-fb+(n+1)*fs, -fb+(n+1)*fs],line_ampl,'--r', linewidth=2)
plt.plot([fb-(n+1)*fs, fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
plt.plot([-fb-(n+1)*fs, -fb-(n+1)*fs],line_ampl,'--g', linewidth=2)
plt.ylabel('Spectrum Magnitude')
plt.xlabel('Frequency in Hz')
plt.axis([-fmax,fmax,0,1])
plt.grid()
def lp_tri(f, fb):
"""
Triangle spectral shape function used by :func:`lp_samp`.
Parameters
----------
f : ndarray containing frequency samples
fb : the bandwidth as a float constant
Returns
-------
x : ndarray of spectrum samples for a single triangle shape
Notes
-----
This is a support function for the lowpass spectrum plotting function
:func:`lp_samp`.
Examples
--------
>>> x = lp_tri(f, fb)
"""
x = np.zeros(len(f))
for k in range(len(f)):
if abs(f[k]) <= fb:
x[k] = 1 - abs(f[k])/float(fb)
return x
def sinusoid_awgn(x, SNRdB):
"""
Add white Gaussian noise to a single real sinusoid.
Input a single sinusoid to this function and it returns a noisy
sinusoid at a specific SNR value in dB. Sinusoid power is calculated
using np.var.
Parameters
----------
x : Input signal as ndarray consisting of a single sinusoid
SNRdB : SNR in dB for output sinusoid
Returns
-------
y : Noisy sinusoid return vector
Examples
--------
>>> # set the SNR to 10 dB
>>> n = arange(0,10000)
>>> x = cos(2*pi*0.04*n)
>>> y = sinusoid_awgn(x,10.0)
"""
# Estimate signal power
x_pwr = np.var(x)
# Create noise vector
noise = np.sqrt(x_pwr/10**(SNRdB/10.))*np.random.randn(len(x));
return x + noise
def simple_quant(x, b_tot, x_max, limit):
"""
A simple rounding quantizer for bipolar signals having Btot = B + 1 bits.
This function models a quantizer that employs Btot bits that has one of
three selectable limiting types: saturation, overflow, and none.
The quantizer is bipolar and implements rounding.
Parameters
----------
x : input signal ndarray to be quantized
b_tot : total number of bits in the quantizer, e.g. 16
x_max : quantizer full-scale dynamic range is [-Xmax, Xmax]
Limit = Limiting of the form 'sat', 'over', 'none'
Returns
-------
xq : quantized output ndarray
Notes
-----
The quantization can be formed as e = xq - x
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from matplotlib.mlab import psd
>>> import numpy as np
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(0,10000)
>>> x = np.cos(2*np.pi*0.211*n)
>>> y = ss.sinusoid_awgn(x,90)
>>> Px, f = psd(y,2**10,Fs=1)
>>> plt.plot(f, 10*np.log10(Px))
>>> plt.ylim([-80, 25])
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel(r'Normalized Frequency $\omega/2\pi$')
>>> plt.show()
>>> yq = ss.simple_quant(y,12,1,'sat')
>>> Px, f = psd(yq,2**10,Fs=1)
>>> plt.plot(f, 10*np.log10(Px))
>>> plt.ylim([-80, 25])
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel(r'Normalized Frequency $\omega/2\pi$')
>>> plt.show()
"""
B = b_tot - 1
x = x / x_max
if limit.lower() == 'over':
xq = (np.mod(np.round(x*2**B) + 2 ** B, 2 ** b_tot) - 2 ** B) / 2 ** B
elif limit.lower() == 'sat':
xq = np.round(x*2**B)+2**B
s1 = np.nonzero(np.ravel(xq >= 2 ** b_tot - 1))[0]
s2 = np.nonzero(np.ravel(xq < 0))[0]
xq[s1] = (2 ** b_tot - 1) * np.ones(len(s1))
xq[s2] = np.zeros(len(s2))
xq = (xq - 2**B)/2**B
elif limit.lower() == 'none':
xq = np.round(x*2**B)/2**B
else:
raise ValueError('limit must be the string over, sat, or none')
return xq * x_max
def prin_alias(f_in,fs):
"""
Calculate the principle alias frequencies.
Given an array of input frequencies the function returns an
array of principle alias frequencies.
Parameters
----------
f_in : ndarray of input frequencies
fs : sampling frequency
Returns
-------
f_out : ndarray of principle alias frequencies
Examples
--------
>>> # Linear frequency sweep from 0 to 50 Hz
>>> f_in = arange(0,50,0.1)
>>> # Calculate principle alias with fs = 10 Hz
>>> f_out = prin_alias(f_in,10)
"""
return abs(np.rint(f_in/fs)*fs - f_in)
"""
Principle alias via recursion
f_out = np.copy(f_in)
for k in range(len(f_out)):
while f_out[k] > fs/2.:
f_out[k] = abs(f_out[k] - fs)
return f_out
"""
def cascade_filters(b1,a1,b2,a2):
"""
Cascade two IIR digital filters into a single (b,a) coefficient set.
To cascade two digital filters (system functions) given their numerator
and denominator coefficients you simply convolve the coefficient arrays.
Parameters
----------
b1 : ndarray of numerator coefficients for filter 1
a1 : ndarray of denominator coefficients for filter 1
b2 : ndarray of numerator coefficients for filter 2
a2 : ndarray of denominator coefficients for filter 2
Returns
-------
b : ndarray of numerator coefficients for the cascade
a : ndarray of denominator coefficients for the cascade
Examples
--------
>>> from scipy import signal
>>> b1,a1 = signal.butter(3, 0.1)
>>> b2,a2 = signal.butter(3, 0.15)
>>> b,a = cascade_filters(b1,a1,b2,a2)
"""
return signal.convolve(b1,b2), signal.convolve(a1,a2)
def soi_snoi_gen(s,SIR_dB,N,fi,fs = 8000):
"""
Add an interfering sinusoidal tone to the input signal at a given SIR_dB.
The input is the signal of interest (SOI) and number of sinsuoid signals
not of interest (SNOI) are addedto the SOI at a prescribed signal-to-
intereference SIR level in dB.
Parameters
----------
s : ndarray of signal of SOI
SIR_dB : interference level in dB
N : Trim input signal s to length N + 1 samples
fi : ndarray of intereference frequencies in Hz
fs : sampling rate in Hz, default is 8000 Hz
Returns
-------
r : ndarray of combined signal plus intereference of length N+1 samples
Examples
--------
>>> # load a speech ndarray and trim to 5*8000 + 1 samples
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
"""
n = np.arange(0,N+1)
K = len(fi)
si = np.zeros(N+1)
for k in range(K):
si += np.cos(2*np.pi*fi[k]/fs*n);
s = s[:N+1]
Ps = np.var(s)
Psi = np.var(si)
r = s + np.sqrt(Ps/Psi*10**(-SIR_dB/10))*si
return r
def lms_ic(r,M,mu,delta=1):
"""
Least mean square (LMS) interference canceller adaptive filter.
A complete LMS adaptive filter simulation function for the case of
interference cancellation. Used in the digital filtering case study.
Parameters
----------
M : FIR Filter length (order M-1)
delta : Delay used to generate the reference signal
mu : LMS step-size
delta : decorrelation delay between input and FIR filter input
Returns
-------
n : ndarray Index vector
r : ndarray noisy (with interference) input signal
r_hat : ndarray filtered output (NB_hat[n])
e : ndarray error sequence (WB_hat[n])
ao : ndarray final value of weight vector
F : ndarray frequency response axis vector
Ao : ndarray frequency response of filter
Examples
----------
>>> # import a speech signal
>>> fs,s = from_wav('OSR_us_000_0030_8k.wav')
>>> # add interference at 1kHz and 1.5 kHz and
>>> # truncate to 5 seconds
>>> r = soi_snoi_gen(s,10,5*8000,[1000, 1500])
>>> # simulate with a 64 tap FIR and mu = 0.005
>>> n,r,r_hat,e,ao,F,Ao = lms_ic(r,64,0.005)
"""
N = len(r)-1;
# Form the reference signal y via delay delta
y = signal.lfilter(np.hstack((np.zeros(delta), np.array([1]))),1,r)
# Initialize output vector x_hat to zero
r_hat = np.zeros(N+1)
# Initialize error vector e to zero
e = np.zeros(N+1)
# Initialize weight vector to zero
ao = np.zeros(M+1)
# Initialize filter memory to zero
z = np.zeros(M)
# Initialize a vector for holding ym of length M+1
ym = np.zeros(M+1)
for k in range(N+1):
# Filter one sample at a time
r_hat[k],z = signal.lfilter(ao,np.array([1]),np.array([y[k]]),zi=z)
# Form the error sequence
e[k] = r[k] - r_hat[k]
# Update the weight vector
ao = ao + 2*mu*e[k]*ym
# Update vector used for correlation with e(k)
ym = np.hstack((np.array([y[k]]), ym[:-1]))
# Create filter frequency response
F, Ao = signal.freqz(ao,1,1024)
F/= (2*np.pi)
Ao = 20*np.log10(abs(Ao))
return np.arange(0,N+1), r, r_hat, e, ao, F, Ao
def fir_iir_notch(fi,fs,r=0.95):
"""
Design a second-order FIR or IIR notch filter.
A second-order FIR notch filter is created by placing conjugate
zeros on the unit circle at angle corresponidng to the notch center
frequency. The IIR notch variation places a pair of conjugate poles
at the same angle, but with radius r < 1 (typically 0.9 to 0.95).
Parameters
----------
fi : notch frequency is Hz relative to fs
fs : the sampling frequency in Hz, e.g. 8000
r : pole radius for IIR version, default = 0.95
Returns
-------
b : numerator coefficient ndarray
a : denominator coefficient ndarray
Notes
-----
If the pole radius is 0 then an FIR version is created, that is
there are no poles except at z = 0.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> b_FIR, a_FIR = ss.fir_iir_notch(1000,8000,0)
>>> ss.zplane(b_FIR, a_FIR)
>>> plt.show()
>>> b_IIR, a_IIR = ss.fir_iir_notch(1000,8000)
>>> ss.zplane(b_IIR, a_IIR)
"""
w0 = 2*np.pi*fi/float(fs)
if r >= 1:
raise ValueError('Poles on or outside unit circle.')
elif r == 0:
a = np.array([1.0])
else:
a = np.array([1, -2*r*np.cos(w0), r**2])
b = np.array([1, -2*np.cos(w0), 1])
return b, a
def simple_sa(x, NS, NFFT, fs, NAVG=1, window='boxcar'):
"""
Spectral estimation using windowing and averaging.
This function implements averaged periodogram spectral estimation
estimation similar to the NumPy's psd() function, but more
specialized for the windowing case study of Chapter 16.
Parameters
----------
x : ndarray containing the input signal
NS : The subrecord length less zero padding, e.g. NS < NFFT
NFFT : FFT length, e.g., 1024 = 2**10
fs : sampling rate in Hz
NAVG : the number of averages, e.g., 1 for deterministic signals
window : hardcoded window 'boxcar' (default) or 'hanning'
Returns
-------
f : ndarray frequency axis in Hz on [0, fs/2]
Sx : ndarray the power spectrum estimate
Notes
-----
The function also prints the maximum number of averages K possible
for the input data record.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm import sigsys as ss
>>> n = np.arange(0,2048)
>>> x = np.cos(2*np.pi*1000/10000*n) + 0.01*np.cos(2*np.pi*3000/10000*n)
>>> f, Sx = ss.simple_sa(x,128,512,10000)
>>> plt.plot(f, 10*np.log10(Sx))
>>> plt.ylim([-80, 0])
>>> plt.xlabel("Frequency (Hz)")
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.show()
With a hanning window.
>>> f, Sx = ss.simple_sa(x,256,1024,10000,window='hanning')
>>> plt.plot(f, 10*np.log10(Sx))
>>> plt.xlabel("Frequency (Hz)")
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.ylim([-80, 0])
"""
Nx = len(x)
K = int(Nx/NS)
log.info('K = ', K)
if NAVG > K:
warnings.warn('NAVG exceeds number of available subrecords')
return 0,0
if window.lower() == 'boxcar' or window.lower() == 'rectangle':
w = signal.boxcar(NS)
elif window.lower() == 'hanning':
w = signal.hanning(NS)
xsw = np.zeros((K,NS)) + 1j*np.zeros((K,NS))
for k in range(NAVG):
xsw[k,] = w*x[k*NS:(k+1)*NS]
Sx = np.zeros(NFFT)
for k in range(NAVG):
X = fft.fft(xsw[k,],NFFT)
Sx += abs(X)**2
Sx /= float(NAVG)
Sx /= float(NFFT**2)
NFFTby2 = int(NFFT/2)
if x.dtype != 'complex128':
n = np.arange(NFFTby2)
f = fs*n/float(NFFT)
Sx = Sx[0:NFFTby2]
else:
n = np.arange(NFFTby2)
f = fs*np.hstack((np.arange(-NFFTby2,0),np.arange(NFFTby2)))/float(NFFT)
Sx = np.hstack((Sx[NFFTby2:],Sx[0:NFFTby2]))
return f, Sx
def line_spectra(fk,Xk,mode,sides=2,linetype='b',lwidth=2,floor_dB=-100,fsize=(6,4)):
"""
Plot the Fourier series line spectral given the coefficients.
This function plots two-sided and one-sided line spectra of a periodic
signal given the complex exponential Fourier series coefficients and
the corresponding harmonic frequencies.
Parameters
----------
fk : vector of real sinusoid frequencies
Xk : magnitude and phase at each positive frequency in fk
mode : 'mag' => magnitude plot, 'magdB' => magnitude in dB plot,
mode cont : 'magdBn' => magnitude in dB normalized, 'phase' => a phase plot in radians
sides : 2; 2-sided or 1-sided
linetype : line type per Matplotlib definitions, e.g., 'b';
lwidth : 2; linewidth in points
fsize : optional figure size in inches, default = (6,4) inches
Returns
-------
Nothing : A plot window opens containing the line spectrum plot
Notes
-----
Since real signals are assumed the frequencies of fk are 0 and/or positive
numbers. The supplied Fourier coefficients correspond.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from sk_dsp_comm.sigsys import line_spectra
>>> n = np.arange(0,25)
>>> # a pulse train with 10 Hz fundamental and 20% duty cycle
>>> fk = n*10
>>> Xk = np.sinc(n*10*.02)*np.exp(-1j*2*np.pi*n*10*.01) # 1j = sqrt(-1)
>>> line_spectra(fk,Xk,'mag')
>>> plt.show()
>>> line_spectra(fk,Xk,'phase')
"""
plt.figure(figsize=fsize)
# Eliminate zero valued coefficients
idx = np.nonzero(Xk)[0]
Xk = Xk[idx]
fk = fk[idx]
if mode == 'mag':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, np.abs(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, 2.*np.abs(Xk[k])],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), 0, 1.05*max(abs(Xk))])
elif sides == 1:
plt.axis([0, 1.2*max(fk), 0, 1.05*2*max(abs(Xk))])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Magnitude')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdB':
Xk_dB = 20*np.log10(np.abs(Xk))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'magdBn':
Xk_dB = 20*np.log10(np.abs(Xk)/max(np.abs(Xk)))
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[floor_dB, Xk_dB[k]],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[floor_dB, Xk_dB[k]+6.02],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
max_dB = np.ceil(max(Xk_dB/10.))*10
min_dB = max(floor_dB,np.floor(min(Xk_dB/10.))*10)
if sides == 2:
plt.axis([-1.2*max(fk), 1.2*max(fk), min_dB, max_dB])
elif sides == 1:
plt.axis([0, 1.2*max(fk), min_dB, max_dB])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Normalized Magnitude (dB)')
plt.xlabel('Frequency (Hz)')
elif mode == 'phase':
for k in range(len(fk)):
if fk[k] == 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] == 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=2*lwidth)
elif fk[k] > 0 and sides == 2:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
plt.plot([-fk[k], -fk[k]],[0, -np.angle(Xk[k])],linetype, linewidth=lwidth)
elif fk[k] > 0 and sides == 1:
plt.plot([fk[k], fk[k]],[0, np.angle(Xk[k])],linetype, linewidth=lwidth)
else:
warnings.warn('Invalid sides type')
plt.grid()
if sides == 2:
plt.plot([-1.2*max(fk), 1.2*max(fk)], [0, 0],'k')
plt.axis([-1.2*max(fk), 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
elif sides == 1:
plt.plot([0, 1.2*max(fk)], [0, 0],'k')
plt.axis([0, 1.2*max(fk), -1.1*max(np.abs(np.angle(Xk))), 1.1*max(np.abs(np.angle(Xk)))])
else:
warnings.warn('Invalid sides type')
plt.ylabel('Phase (rad)')
plt.xlabel('Frequency (Hz)')
else:
warnings.warn('Invalid mode type')
def fs_coeff(xp,N,f0,one_side=True):
"""
Numerically approximate the Fourier series coefficients given periodic x(t).
The input is assummed to represent one period of the waveform
x(t) that has been uniformly sampled. The number of samples supplied
to represent one period of the waveform sets the sampling rate.
Parameters
----------
xp : ndarray of one period of the waveform x(t)
N : maximum Fourier series coefficient, [0,...,N]
f0 : fundamental frequency used to form fk.
Returns
-------
Xk : ndarray of the coefficients over indices [0,1,...,N]
fk : ndarray of the harmonic frequencies [0, f0,2f0,...,Nf0]
Notes
-----
len(xp) >= 2*N+1 as len(xp) is the fft length.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> import sk_dsp_comm.sigsys as ss
>>> t = arange(0,1,1/1024.)
>>> # a 20% duty cycle pulse starting at t = 0
>>> x_rect = ss.rect(t-.1,0.2)
>>> Xk, fk = ss.fs_coeff(x_rect,25,10)
>>> # plot the spectral lines
>>> ss.line_spectra(fk,Xk,'mag')
>>> plt.show()
"""
Nint = len(xp)
if Nint < 2*N+1:
raise ValueError('Number of samples in xp insufficient for requested N.')
Xp = fft.fft(xp,Nint)/float(Nint)
# To interface with the line_spectra function use one_side mode
if one_side:
Xk = Xp[0:N+1]
fk = f0*np.arange(0,N+1)
else:
Xk = np.hstack((Xp[-N:],Xp[0:N+1]))
fk = f0*np.arange(-N,N+1)
return Xk, fk
def fs_approx(Xk,fk,t):
"""
Synthesize periodic signal x(t) using Fourier series coefficients at harmonic frequencies
Assume the signal is real so coefficients Xk are supplied for nonnegative
indicies. The negative index coefficients are assumed to be complex
conjugates.
Parameters
----------
Xk : ndarray of complex Fourier series coefficients
fk : ndarray of harmonic frequencies in Hz
t : ndarray time axis corresponding to output signal array x_approx
Returns
-------
x_approx : ndarray of periodic waveform approximation over time span t
Examples
--------
>>> t = arange(0,2,.002)
>>> # a 20% duty cycle pulse train
>>> n = arange(0,20,1) # 0 to 19th harmonic
>>> fk = 1*n % period = 1s
>>> t, x_approx = fs_approx(Xk,fk,t)
>>> plot(t,x_approx)
"""
x_approx = np.zeros(len(t))
for k,Xkk in enumerate(Xk):
if fk[k] == 0:
x_approx += Xkk.real*np.ones(len(t))
else:
x_approx += 2*np.abs(Xkk)*np.cos(2*np.pi*fk[k]*t+np.angle(Xkk))
return x_approx
def ft_approx(x,t,Nfft):
'''
Approximate the Fourier transform of a finite duration signal using scipy.signal.freqz()
Parameters
----------
x : input signal array
t : time array used to create x(t)
Nfft : the number of frdquency domain points used to
approximate X(f) on the interval [fs/2,fs/2], where
fs = 1/Dt. Dt being the time spacing in array t
Returns
-------
f : frequency axis array in Hz
X : the Fourier transform approximation (complex)
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The default
extents of ('f','f') are used for signals that are active (have support)
on or within n1 and n2 respectively. A right-sided signal such as
:math:`a^n*u[n]` is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> fs = 100 # sampling rate in Hz
>>> tau = 1
>>> t = np.arange(-5,5,1/fs)
>>> x0 = ss.rect(t-.5,tau)
>>> plt.figure(figsize=(6,5))
>>> plt.plot(t,x0)
>>> plt.grid()
>>> plt.ylim([-0.1,1.1])
>>> plt.xlim([-2,2])
>>> plt.title(r'Exact Waveform')
>>> plt.xlabel(r'Time (s)')
>>> plt.ylabel(r'$x_0(t)$')
>>> plt.show()
>>> # FT Exact Plot
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> fs = 100 # sampling rate in Hz
>>> tau = 1
>>> t = np.arange(-5,5,1/fs)
>>> x0 = ss.rect(t-.5,tau)
>>> fe = np.arange(-10,10,.01)
>>> X0e = tau*np.sinc(fe*tau)
>>> plt.plot(fe,abs(X0e))
>>> #plot(f,angle(X0))
>>> plt.grid()
>>> plt.xlim([-10,10])
>>> plt.title(r'Exact (Theory) Spectrum Magnitude')
>>> plt.xlabel(r'Frequency (Hz)')
>>> plt.ylabel(r'$|X_0e(f)|$')
>>> plt.show()
>>> # FT Approximation Plot
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> fs = 100 # sampling rate in Hz
>>> tau = 1
>>> t = np.arange(-5,5,1/fs)
>>> x0 = ss.rect(t-.5,tau)
>>> f,X0 = ss.ft_approx(x0,t,4096)
>>> plt.plot(f,abs(X0))
>>> #plt.plot(f,angle(X0))
>>> plt.grid()
>>> plt.xlim([-10,10])
>>> plt.title(r'Approximation Spectrum Magnitude')
>>> plt.xlabel(r'Frequency (Hz)')
>>> plt.ylabel(r'$|X_0(f)|$');
>>> plt.tight_layout()
>>> plt.show()
'''
fs = 1/(t[1] - t[0])
t0 = (t[-1]+t[0])/2 # time delay at center
N0 = len(t)/2 # FFT center in samples
f = np.arange(-1./2,1./2,1./Nfft)
w, X = signal.freqz(x,1,2*np.pi*f)
X /= fs # account for dt = 1/fs in integral
X *= np.exp(-1j*2*np.pi*f*fs*t0)# time interval correction
X *= np.exp(1j*2*np.pi*f*N0)# FFT time interval is [0,Nfft-1]
F = f*fs
return F, X
def conv_sum(x1,nx1,x2,nx2,extent=('f','f')):
"""
Discrete convolution of x1 and x2 with proper tracking of the output time axis.
Convolve two discrete-time signals using the SciPy function :func:`scipy.signal.convolution`.
The time (sequence axis) are managed from input to output. y[n] = x1[n]*x2[n].
Parameters
----------
x1 : ndarray of signal x1 corresponding to nx1
nx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to nx2
nx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ny : ndarray of the corresponding sequence index n
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The default
extents of ('f','f') are used for signals that are active (have support)
on or within n1 and n2 respectively. A right-sided signal such as
a^n*u[n] is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> nx = np.arange(-5,10)
>>> x = ss.drect(nx,4)
>>> y,ny = ss.conv_sum(x,nx,x,nx)
>>> plt.stem(ny,y)
>>> plt.show()
Consider a pulse convolved with an exponential. ('r' type extent)
>>> h = 0.5**nx*ss.dstep(nx)
>>> y,ny = ss.conv_sum(x,nx,h,nx,('f','r')) # note extents set
>>> plt.stem(ny,y) # expect a pulse charge and discharge sequence
"""
nnx1 = np.arange(0,len(nx1))
nnx2 = np.arange(0,len(nx2))
n1 = nnx1[0]
n2 = nnx1[-1]
n3 = nnx2[0]
n4 = nnx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n4+1-1)
ny = np.arange(0,len(x1)+len(x2)-1) + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
nny = np.arange(n1+n3,n1+1+n4+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
nny = np.arange(n1+n3,n2+1+n3+1-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
nny = np.arange(n2+n3,n2+1+n4+1-1)
ny = nny + nx1[-1]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
nny = np.arange(n1+n4,n2+1+n4+1-1)
ny = nny + nx1[0]+nx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
nny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ny = nny + nx1[0]+nx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
nny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ny = nny + max(nx1[0]+nx2[-1],nx1[-1]+nx2[0])
else:
raise ValueError('Invalid x1 x2 extents specified or valid extent not found!')
# Finally convolve the sequences
y = signal.convolve(x1, x2)
log.info('Output support: (%+d, %+d)' % (ny[0],ny[-1]))
return y[nny], ny
def conv_integral(x1,tx1,x2,tx2,extent=('f','f')):
"""
Continuous-time convolution of x1 and x2 with proper tracking of the output time axis.
Appromimate the convolution integral for the convolution of two continuous-time signals using the SciPy function signal. The time (sequence axis) are managed from input to output. y(t) = x1(t)*x2(t).
Parameters
----------
x1 : ndarray of signal x1 corresponding to tx1
tx1 : ndarray time axis for x1
x2 : ndarray of signal x2 corresponding to tx2
tx2 : ndarray time axis for x2
extent : ('e1','e2') where 'e1', 'e2' may be 'f' finite, 'r' right-sided, or 'l' left-sided
Returns
-------
y : ndarray of output values y
ty : ndarray of the corresponding time axis for y
Notes
-----
The output time axis starts at the sum of the starting values in x1 and x2
and ends at the sum of the two ending values in x1 and x2. The time steps used in
x1(t) and x2(t) must match. The default extents of ('f','f') are used for signals
that are active (have support) on or within t1 and t2 respectively. A right-sided
signal such as exp(-a*t)*u(t) is semi-infinite, so it has extent 'r' and the
convolution output will be truncated to display only the valid results.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> import sk_dsp_comm.sigsys as ss
>>> tx = np.arange(-5,10,.01)
>>> x = ss.rect(tx-2,4) # pulse starts at t = 0
>>> y,ty = ss.conv_integral(x,tx,x,tx)
>>> plt.plot(ty,y) # expect a triangle on [0,8]
>>> plt.show()
Now, consider a pulse convolved with an exponential.
>>> h = 4*np.exp(-4*tx)*ss.step(tx)
>>> y,ty = ss.conv_integral(x,tx,h,tx,extent=('f','r')) # note extents set
>>> plt.plot(ty,y) # expect a pulse charge and discharge waveform
"""
dt = tx1[1] - tx1[0]
nx1 = np.arange(0,len(tx1))
nx2 = np.arange(0,len(tx2))
n1 = nx1[0]
n2 = nx1[-1]
n3 = nx2[0]
n4 = nx2[-1]
# Start by finding the valid output support or extent interval to insure that
# for no finite extent signals ambiquous results are not returned.
# Valid extents are f (finite), r (right-sided), and l (left-sided)
if extent[0] == 'f' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n4+1-1)
ty = np.arange(0,len(x1)+len(x2)-1)*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'r':
ny = np.arange(n1+n3,n1+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'r' and extent[1] == 'f':
ny = np.arange(n1+n3,n2+1+n3+1-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'f' and extent[1] == 'l':
ny = np.arange(n2+n3,n2+1+n4+1-1)
ty = ny*dt + tx1[-1]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'f':
ny = np.arange(n1+n4,n2+1+n4+1-1)
ty = ny*dt + tx1[0]+tx2[-1]
elif extent[0] == 'r' and extent[1] == 'r':
ny = np.arange(n1+n3,min(n1+1+n4+1,n2+1+n3+1)-1)
ty = ny*dt + tx1[0]+tx2[0]
elif extent[0] == 'l' and extent[1] == 'l':
ny = np.arange(max(n1+n4,n2+n3),n2+1+n4+1-1)
ty = ny*dt + max(tx1[0]+tx2[-1],tx1[-1]+tx2[0])
else:
raise ValueError('Invalid x1 x2 extents specified or valid extent not found!')
# Finally convolve the sampled sequences and scale by dt
y = signal.convolve(x1, x2)*dt
log.info('Output support: (%+2.2f, %+2.2f)' % (ty[0],ty[-1]))
return y[ny], ty
def delta_eps(t,eps):
"""
Rectangular pulse approximation to impulse function.
Parameters
----------
t : ndarray of time axis
eps : pulse width
Returns
-------
d : ndarray containing the impulse approximation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import delta_eps
>>> t = np.arange(-2,2,.001)
>>> d = delta_eps(t,.1)
>>> plt.plot(t,d)
>>> plt.show()
"""
d = np.zeros(len(t))
for k,tt in enumerate(t):
if abs(tt) <= eps/2.:
d[k] = 1/float(eps)
return d
def step(t):
"""
Approximation to step function signal u(t).
In this numerical version of u(t) the step turns on at t = 0.
Parameters
----------
t : ndarray of the time axis
Returns
-------
x : ndarray of the step function signal u(t)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import step
>>> t = arange(-1,5,.01)
>>> x = step(t)
>>> plt.plot(t,x)
>>> plt.ylim([-0.01, 1.01])
>>> plt.show()
To turn on at t = 1, shift t.
>>> x = step(t - 1.0)
>>> plt.ylim([-0.01, 1.01])
>>> plt.plot(t,x)
"""
x = np.zeros(len(t))
for k,tt in enumerate(t):
if tt >= 0:
x[k] = 1.0
return x
def rect(t,tau):
"""
Approximation to the rectangle pulse Pi(t/tau).
In this numerical version of Pi(t/tau) the pulse is active
over -tau/2 <= t <= tau/2.
Parameters
----------
t : ndarray of the time axis
tau : the pulse width
Returns
-------
x : ndarray of the signal Pi(t/tau)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import rect
>>> t = arange(-1,5,.01)
>>> x = rect(t,1.0)
>>> plt.plot(t,x)
>>> plt.ylim([0, 1.01])
>>> plt.show()
To turn on the pulse at t = 1 shift t.
>>> x = rect(t - 1.0,1.0)
>>> plt.plot(t,x)
>>> plt.ylim([0, 1.01])
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/2.:
x[k] = 0
else:
x[k] = 1
return x
def tri(t,tau):
"""
Approximation to the triangle pulse Lambda(t/tau).
In this numerical version of Lambda(t/tau) the pulse is active
over -tau <= t <= tau.
Parameters
----------
t : ndarray of the time axis
tau : one half the triangle base width
Returns
-------
x : ndarray of the signal Lambda(t/tau)
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import tri
>>> t = arange(-1,5,.01)
>>> x = tri(t,1.0)
>>> plt.plot(t,x)
>>> plt.show()
To turn on at t = 1, shift t.
>>> x = tri(t - 1.0,1.0)
>>> plt.plot(t,x)
"""
x = np.zeros(len(t))
for k,tk in enumerate(t):
if np.abs(tk) > tau/1.:
x[k] = 0
else:
x[k] = 1 - np.abs(tk)/tau
return x
def dimpulse(n):
"""
Discrete impulse function delta[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal delta[n]
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import dimpulse
>>> n = arange(-5,5)
>>> x = dimpulse(n)
>>> plt.stem(n,x)
>>> plt.show()
Shift the delta left by 2.
>>> x = dimpulse(n+2)
>>> plt.stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn == 0:
x[k] = 1.0
return x
def dstep(n):
"""
Discrete step function u[n].
Parameters
----------
n : ndarray of the time axis
Returns
-------
x : ndarray of the signal u[n]
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import dstep
>>> n = arange(-5,5)
>>> x = dstep(n)
>>> plt.stem(n,x)
>>> plt.show()
Shift the delta left by 2.
>>> x = dstep(n+2)
>>> plt.stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0:
x[k] = 1.0
return x
def drect(n,N):
"""
Discrete rectangle function of duration N samples.
The signal is active on the interval 0 <= n <= N-1. Also known
as the rectangular window function, which is available in
scipy.signal.
Parameters
----------
n : ndarray of the time axis
N : the pulse duration
Returns
-------
x : ndarray of the signal
Notes
-----
The discrete rectangle turns on at n = 0, off at n = N-1 and
has duration of exactly N samples.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import drect
>>> n = arange(-5,5)
>>> x = drect(n, N=3)
>>> plt.stem(n,x)
>>> plt.show()
Shift the delta left by 2.
>>> x = drect(n+2, N=3)
>>> plt.stem(n,x)
"""
x = np.zeros(len(n))
for k,nn in enumerate(n):
if nn >= 0 and nn < N:
x[k] = 1.0
return x
def rc_imp(Ns,alpha,M=6):
"""
A truncated raised cosine pulse used in digital communications.
The pulse shaping factor :math:`0< \\alpha < 1` is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform.
Examples
--------
Ten samples per symbol and alpha = 0.35.
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import rc_imp
>>> b = rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> plt.stem(n,b)
>>> plt.show()
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n));
a = alpha;
Ns *= 1.0
for i in range(len(n)):
if (1 - 4*(a*n[i]/Ns)**2) == 0:
b[i] = np.pi/4*np.sinc(1/(2.*a))
else:
b[i] = np.sinc(n[i]/Ns)*np.cos(np.pi*a*n[i]/Ns)/(1 - 4*(a*n[i]/Ns)**2)
return b
def sqrt_rc_imp(Ns,alpha,M=6):
"""
A truncated square root raised cosine pulse used in digital communications.
The pulse shaping factor 0< alpha < 1 is required as well as the
truncation factor M which sets the pulse duration to be 2*M*Tsymbol.
Parameters
----------
Ns : number of samples per symbol
alpha : excess bandwidth factor on (0, 1), e.g., 0.35
M : equals RC one-sided symbol truncation factor
Returns
-------
b : ndarray containing the pulse shape
Notes
-----
The pulse shape b is typically used as the FIR filter coefficients
when forming a pulse shaped digital communications waveform. When
square root raised cosine (SRC) pulse is used generate Tx signals and
at the receiver used as a matched filter (receiver FIR filter), the
received signal is now raised cosine shaped, this having zero
intersymbol interference and the optimum removal of additive white
noise if present at the receiver input.
Examples
--------
>>> # ten samples per symbol and alpha = 0.35
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import sqrt_rc_imp
>>> b = sqrt_rc_imp(10,0.35)
>>> n = arange(-10*6,10*6+1)
>>> plt.stem(n,b)
>>> plt.show()
"""
# Design the filter
n = np.arange(-M*Ns,M*Ns+1)
b = np.zeros(len(n))
Ns *= 1.0
a = alpha
for i in range(len(n)):
if abs(1 - 16*a**2*(n[i]/Ns)**2) <= np.finfo(np.float).eps/2:
b[i] = 1/2.*((1+a)*np.sin((1+a)*np.pi/(4.*a))-(1-a)*np.cos((1-a)*np.pi/(4.*a))+(4*a)/np.pi*np.sin((1-a)*np.pi/(4.*a)))
else:
b[i] = 4*a/(np.pi*(1 - 16*a**2*(n[i]/Ns)**2))
b[i] = b[i]*(np.cos((1+a)*np.pi*n[i]/Ns) + np.sinc((1-a)*n[i]/Ns)*(1-a)*np.pi/(4.*a))
return b
def pn_gen(n_bits, m=5):
"""
Maximal length sequence signal generator.
Generates a sequence 0/1 bits of N_bit duration. The bits themselves
are obtained from an m-sequence of length m. Available m-sequence
(PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
n_bits : the number of bits to generate
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
PN : ndarray of the generator output over N_bits
Notes
-----
The sequence is periodic having period 2**m - 1 (2^m - 1).
Examples
--------
>>> # A 15 bit period signal nover 50 bits
>>> PN = pn_gen(50,4)
"""
c = m_seq(m)
Q = len(c)
max_periods = int(np.ceil(n_bits / float(Q)))
PN = np.zeros(max_periods*Q)
for k in range(max_periods):
PN[k*Q:(k+1)*Q] = c
PN = np.resize(PN, (1, n_bits))
return PN.flatten()
def m_seq(m):
"""
Generate an m-sequence ndarray using an all-ones initialization.
Available m-sequence (PN generators) include m = 2,3,...,12, & 16.
Parameters
----------
m : the number of shift registers. 2,3, .., 12, & 16
Returns
-------
c : ndarray of one period of the m-sequence
Notes
-----
The sequence period is 2**m - 1 (2^m - 1).
Examples
--------
>>> c = m_seq(5)
"""
if m == 2:
taps = np.array([1, 1, 1])
elif m == 3:
taps = np.array([1, 0, 1, 1])
elif m == 4:
taps = np.array([1, 0, 0, 1, 1])
elif m == 5:
taps = np.array([1, 0, 0, 1, 0, 1])
elif m == 6:
taps = np.array([1, 0, 0, 0, 0, 1, 1])
elif m == 7:
taps = np.array([1, 0, 0, 0, 1, 0, 0, 1])
elif m == 8:
taps = np.array([1, 0, 0, 0, 1, 1, 1, 0, 1])
elif m == 9:
taps = np.array([1, 0, 0, 0, 0, 1, 0, 0, 0, 1])
elif m == 10:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1])
elif m == 11:
taps = np.array([1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1])
elif m == 12:
taps = np.array([1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1])
elif m == 16:
taps = np.array([1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1])
else:
raise ValueError('Invalid length specified')
# Load shift register with all ones to start
sr = np.ones(m)
# M-squence length is:
Q = 2**m - 1
c = np.zeros(Q)
for n in range(Q):
tap_xor = 0
c[n] = sr[-1]
for k in range(1,m):
if taps[k] == 1:
tap_xor = np.bitwise_xor(tap_xor,np.bitwise_xor(int(sr[-1]),int(sr[m-1-k])))
sr[1:] = sr[:-1]
sr[0] = tap_xor
return c
def bpsk_tx(N_bits, Ns, ach_fc=2.0, ach_lvl_dB=-100, pulse='rect', alpha = 0.25, M=6):
"""
Generates biphase shift keyed (BPSK) transmitter with adjacent channel interference.
Generates three BPSK signals with rectangular or square root raised cosine (SRC)
pulse shaping of duration N_bits and Ns samples per bit. The desired signal is
centered on f = 0, which the adjacent channel signals to the left and right
are also generated at dB level relative to the desired signal. Used in the
digital communications Case Study supplement.
Parameters
----------
N_bits : the number of bits to simulate
Ns : the number of samples per bit
ach_fc : the frequency offset of the adjacent channel signals (default 2.0)
ach_lvl_dB : the level of the adjacent channel signals in dB (default -100)
pulse :the pulse shape 'rect' or 'src'
alpha : square root raised cosine pulse shape factor (default = 0.25)
M : square root raised cosine pulse truncation factor (default = 6)
Returns
-------
x : ndarray of the composite signal x0 + ach_lvl*(x1p + x1m)
b : the transmit pulse shape
data0 : the data bits used to form the desired signal; used for error checking
Notes
-----
Examples
--------
>>> x,b,data0 = bpsk_tx(1000,10,pulse='src')
"""
pulse_types = ['rect', 'src']
if pulse not in pulse_types:
raise ValueError('Pulse shape must be \'rect\' or \'src\'''')
x0,b,data0 = nrz_bits(N_bits, Ns, pulse, alpha, M)
x1p,b,data1p = nrz_bits(N_bits, Ns, pulse, alpha, M)
x1m,b,data1m = nrz_bits(N_bits, Ns, pulse, alpha, M)
n = np.arange(len(x0))
x1p = x1p*np.exp(1j*2*np.pi*ach_fc/float(Ns)*n)
x1m = x1m*np.exp(-1j*2*np.pi*ach_fc/float(Ns)*n)
ach_lvl = 10**(ach_lvl_dB/20.)
return x0 + ach_lvl*(x1p + x1m), b, data0
def nrz_bits(n_bits, ns, pulse='rect', alpha=0.25, m=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping.
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping.
Parameters
----------
n_bits : number of NRZ +/-1 data bits to produce
ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
m : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
data : ndarray of the underlying data bits
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
This function is used by BPSK_tx in the Case Study article.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.sigsys import nrz_bits
>>> from numpy import arange
>>> x,b,data = nrz_bits(100, 10)
>>> t = arange(len(x))
>>> plt.plot(t, x)
>>> plt.ylim([-1.01, 1.01])
>>> plt.show()
"""
data = np.random.randint(0, 2, n_bits)
n_zeros = np.zeros((n_bits, int(ns) - 1))
x = np.hstack((2 * data.reshape(n_bits, 1) - 1, n_zeros))
x =x.flatten()
if pulse.lower() == 'rect':
b = np.ones(int(ns))
elif pulse.lower() == 'rc':
b = rc_imp(ns, alpha, m)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(ns, alpha, m)
else:
raise ValueError('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x, b / float(ns), data
def nrz_bits2(data, Ns, pulse='rect', alpha = 0.25, M=6):
"""
Generate non-return-to-zero (NRZ) data bits with pulse shaping with user data
A baseband digital data signal using +/-1 amplitude signal values
and including pulse shaping. The data sequence is user supplied.
Parameters
----------
data : ndarray of the data bits as 0/1 values
Ns : the number of samples per bit,
pulse_type : 'rect' , 'rc', 'src' (default 'rect')
alpha : excess bandwidth factor(default 0.25)
M : single sided pulse duration (default = 6)
Returns
-------
x : ndarray of the NRZ signal values
b : ndarray of the pulse shape
Notes
-----
Pulse shapes include 'rect' (rectangular), 'rc' (raised cosine),
'src' (root raised cosine). The actual pulse length is 2*M+1 samples.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm.sigsys import nrz_bits2
>>> from sk_dsp_comm.sigsys import m_seq
>>> from numpy import arange
>>> x,b = nrz_bits2(m_seq(5),10)
>>> t = arange(len(x))
>>> plt.ylim([-1.01, 1.01])
>>> plt.plot(t,x)
"""
N_bits = len(data)
n_zeros = np.zeros((N_bits,int(Ns)-1))
x = np.hstack((2*data.reshape(N_bits,1)-1,n_zeros))
x = x.flatten()
if pulse.lower() == 'rect':
b = np.ones(int(Ns))
elif pulse.lower() == 'rc':
b = rc_imp(Ns,alpha,M)
elif pulse.lower() == 'src':
b = sqrt_rc_imp(Ns,alpha,M)
else:
raise ValueError('pulse type must be rec, rc, or src')
x = signal.lfilter(b,1,x)
return x,b/float(Ns)
def eye_plot(x, l, s=0):
"""
Eye pattern plot of a baseband digital communications waveform.
The signal must be real, but can be multivalued in terms of the underlying
modulation scheme. Used for BPSK eye plots in the Case Study article.
Parameters
----------
x : ndarray of the real input data vector/array
l : display length in samples (usually two symbols)
s : start index
Returns
-------
Nothing : A plot window opens containing the eye plot
Notes
-----
Increase S to eliminate filter transients.
Examples
--------
1000 bits at 10 samples per bit with 'rc' shaping.
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> x,b, data = ss.nrz_bits(1000,10,'rc')
>>> ss.eye_plot(x,20,60)
"""
plt.figure(figsize=(6,4))
idx = np.arange(0, l + 1)
plt.plot(idx, x[s:s + l + 1], 'b')
k_max = int((len(x) - s) / l) - 1
for k in range(1,k_max):
plt.plot(idx, x[s + k * l:s + l + 1 + k * l], 'b')
plt.grid()
plt.xlabel('Time Index - n')
plt.ylabel('Amplitude')
plt.title('Eye Plot')
return 0
def scatter(x, ns, start):
"""
Sample a baseband digital communications waveform at the symbol spacing.
Parameters
----------
x : ndarray of the input digital comm signal
ns : number of samples per symbol (bit)
start : the array index to start the sampling
Returns
-------
xI : ndarray of the real part of x following sampling
xQ : ndarray of the imaginary part of x following sampling
Notes
-----
Normally the signal is complex, so the scatter plot contains
clusters at points in the complex plane. For a binary signal
such as BPSK, the point centers are nominally +/-1 on the real
axis. Start is used to eliminate transients from the FIR
pulse shaping filters from appearing in the scatter plot.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import sigsys as ss
>>> x,b, data = ss.nrz_bits(1000,10,'rc')
>>> # Add some noise so points are now scattered about +/-1
>>> y = ss.cpx_awgn(x,20,10)
>>> yI,yQ = ss.scatter(y,10,60)
>>> plt.plot(yI,yQ,'.')
>>> plt.axis('equal')
>>> plt.ylabel("Quadrature")
>>> plt.xlabel("In-Phase")
>>> plt.grid()
>>> plt.show()
"""
xI = np.real(x[start::ns])
xQ = np.imag(x[start::ns])
return xI, xQ
def bit_errors(z, data, start, ns):
"""
A simple bit error counting function.
In its present form this function counts bit errors between
hard decision BPSK bits in +/-1 form and compares them with
0/1 binary data that was transmitted. Timing between the Tx
and Rx data is the responsibility of the user. An enhanced
version of this function, which features automatic synching
will be created in the future.
Parameters
----------
z : ndarray of hard decision BPSK data prior to symbol spaced sampling
data : ndarray of reference bits in 1/0 format
start : timing reference for the received
ns : the number of samples per symbol
Returns
-------
Pe_hat : the estimated probability of a bit error
Notes
-----
The Tx and Rx data streams are exclusive-or'd and the then the bit errors
are summed, and finally divided by the number of bits observed to form an
estimate of the bit error probability. This function needs to be
enhanced to be more useful.
Examples
--------
>>> from scipy import signal
>>> x,b, data = nrz_bits(1000,10)
>>> # set Eb/N0 to 8 dB
>>> y = cpx_awgn(x,8,10)
>>> # matched filter the signal
>>> z = signal.lfilter(b,1,y)
>>> # make bit decisions at 10 and Ns multiples thereafter
>>> Pe_hat = bit_errors(z,data,10,10)
"""
Pe_hat = np.sum(data[0:len(z[start::ns])] ^ np.int64((np.sign(np.real(z[start::ns])) + 1) / 2)) / float(len(z[start::ns]))
return Pe_hat
def cpx_awgn(x, es_n0, ns):
"""
Apply white Gaussian noise to a digital communications signal.
This function represents a complex baseband white Gaussian noise
digital communications channel. The input signal array may be real
or complex.
Parameters
----------
x : ndarray noise free complex baseband input signal.
EsNO : set the channel Es/N0 (Eb/N0 for binary) level in dB
ns : number of samples per symbol (bit)
Returns
-------
y : ndarray x with additive noise added.
Notes
-----
Set the channel energy per symbol-to-noise power spectral
density ratio (Es/N0) in dB.
Examples
--------
>>> x,b, data = nrz_bits(1000,10)
>>> # set Eb/N0 = 10 dB
>>> y = cpx_awgn(x,10,10)
"""
w = np.sqrt(ns * np.var(x) * 10 ** (-es_n0 / 10.) / 2.) * (np.random.randn(len(x)) + 1j * np.random.randn(len(x)))
return x+w
def my_psd(x,NFFT=2**10,Fs=1):
"""
A local version of NumPy's PSD function that returns the plot arrays.
A mlab.psd wrapper function that returns two ndarrays;
makes no attempt to auto plot anything.
Parameters
----------
x : ndarray input signal
NFFT : a power of two, e.g., 2**10 = 1024
Fs : the sampling rate in Hz
Returns
-------
Px : ndarray of the power spectrum estimate
f : ndarray of frequency values
Notes
-----
This function makes it easier to overlay spectrum plots because
you have better control over the axis scaling than when using psd()
in the autoscale mode.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import log10
>>> from sk_dsp_comm import sigsys as ss
>>> x,b, data = ss.nrz_bits(10000,10)
>>> Px,f = ss.my_psd(x,2**10,10)
>>> plt.plot(f, 10*log10(Px))
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (Hz)")
>>> plt.show()
"""
Px,f = pylab.mlab.psd(x,NFFT,Fs)
return Px.flatten(), f
def am_tx(m,a_mod,fc=75e3):
"""
AM transmitter for Case Study of Chapter 17.
Assume input is sampled at 8 Ksps and upsampling
by 24 is performed to arrive at fs_out = 192 Ksps.
Parameters
----------
m : ndarray of the input message signal
a_mod : AM modulation index, between 0 and 1
fc : the carrier frequency in Hz
Returns
-------
x192 : ndarray of the upsampled by 24 and modulated carrier
t192 : ndarray of the upsampled by 24 time axis
m24 : ndarray of the upsampled by 24 message signal
Notes
-----
The sampling rate of the input signal is assumed to be 8 kHz.
Examples
--------
>>> n = arange(0,1000)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192 = am_tx(m,0.8,fc=75e3)
"""
m24 = interp24(m)
t192 = np.arange(len(m24))/192.0e3
#m24 = np.cos(2*np.pi*2.0e3*t192)
m_max = np.max(np.abs(m24))
x192 = (1 + a_mod*m24/m_max)*np.cos(2*np.pi*fc*t192)
return x192, t192, m24
def am_rx(x192):
"""
AM envelope detector receiver for the Chapter 17 Case Study
The receiver bandpass filter is not included in this function.
Parameters
----------
x192 : ndarray of the AM signal at sampling rate 192 ksps
Returns
-------
m_rx8 : ndarray of the demodulated message at 8 ksps
t8 : ndarray of the time axis at 8 ksps
m_rx192 : ndarray of the demodulated output at 192 ksps
x_edet192 : ndarray of the envelope detector output at 192 ksps
Notes
-----
The bandpass filter needed at the receiver front-end can be designed
using b_bpf,a_bpf = :func:`am_rx_BPF`.
Examples
--------
>>> import numpy as np
>>> n = np.arange(0,1000)
>>> # 1 kHz message signal
>>> m = np.cos(2*np.pi*1000/8000.*n)
>>> m_rx8,t8,m_rx192,x_edet192 = am_rx(x192)
"""
x_edet192 = env_det(x192)
m_rx8 = deci24(x_edet192)
# remove DC offset from the env_det + LPF output
m_rx8 -= np.mean(m_rx8)
t8 = np.arange(len(m_rx8))/8.0e3
"""
For performance testing also filter x_env_det
192e3 using a Butterworth cascade.
The filter cutoff is 5kHz, the message BW.
"""
b192,a192 = signal.butter(5,2*5.0e3/192.0e3)
m_rx192 = signal.lfilter(b192,a192,x_edet192)
m_rx192 = signal.lfilter(b192,a192,m_rx192)
m_rx192 -= np.mean(m_rx192)
return m_rx8,t8,m_rx192,x_edet192
def am_rx_bpf(n_order=7, ripple_dB=1, b=10e3, fs=192e3):
"""
Bandpass filter design for the AM receiver Case Study of Chapter 17.
Design a 7th-order Chebyshev type 1 bandpass filter to remove/reduce
adjacent channel intereference at the envelope detector input.
Parameters
----------
n_order : the filter order (default = 7)
ripple_dB : the passband ripple in dB (default = 1)
b : the RF bandwidth (default = 10e3)
fs : the sampling frequency
Returns
-------
b_bpf : ndarray of the numerator filter coefficients
a_bpf : ndarray of the denominator filter coefficients
Examples
--------
>>> from scipy import signal
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> import sk_dsp_comm.sigsys as ss
>>> # Use the default values
>>> b_bpf,a_bpf = ss.am_rx_bpf()
Pole-zero plot of the filter.
>>> ss.zplane(b_bpf,a_bpf)
>>> plt.show()
Plot of the frequency response.
>>> f = np.arange(0,192/2.,.1)
>>> w, Hbpf = signal.freqz(b_bpf,a_bpf,2*np.pi*f/192)
>>> plt.plot(f*10,20*np.log10(abs(Hbpf)))
>>> plt.axis([0,1920/2.,-80,10])
>>> plt.ylabel("Power Spectral Density (dB)")
>>> plt.xlabel("Frequency (kHz)")
>>> plt.show()
"""
b_bpf,a_bpf = signal.cheby1(n_order, ripple_dB, 2 * np.array([75e3 - b / 2., 75e3 + b / 2.]) / fs, 'bandpass')
return b_bpf,a_bpf
def env_det(x):
"""
Ideal envelope detector.
This function retains the positive half cycles of the input signal.
Parameters
----------
x : ndarray of the input sugnal
Returns
-------
y : ndarray of the output signal
Examples
--------
>>> n = arange(0,100)
>>> # 1 kHz message signal
>>> m = cos(2*pi*1000/8000.*n)
>>> x192, t192, m24 = am_tx(m,0.8,fc=75e3)
>>> y = env_det(x192)
"""
y = np.zeros(len(x))
for k,xx in enumerate(x):
if xx >= 0:
y[k] = xx
return y
def interp24(x):
"""
Interpolate by L = 24 using Butterworth filters.
The interpolation is done using three stages. Upsample by
L = 2 and lowpass filter, upsample by 3 and lowpass filter, then
upsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = interp24(x)
"""
# Stage 1: L = 2
b2,a2 = signal.butter(10,1/2.)
y1 = upsample(x,2)
y1 = signal.lfilter(b2,a2,2*y1)
# Stage 2: L = 3
b3,a3 = signal.butter(10,1/3.)
y2 = upsample(y1,3)
y2 = signal.lfilter(b3,a3,3*y2)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = upsample(y2,4)
y3 = signal.lfilter(b4,a4,4*y3)
return y3
def deci24(x):
"""
Decimate by L = 24 using Butterworth filters.
The decimation is done using two three stages. Downsample sample by
L = 2 and lowpass filter, downsample by 3 and lowpass filter, then
downsample by L = 4 and lowpass filter. In all cases the lowpass
filter is a 10th-order Butterworth lowpass.
Parameters
----------
x : ndarray of the input signal
Returns
-------
y : ndarray of the output signal
Notes
-----
The cutoff frequency of the lowpass filters is 1/2, 1/3, and 1/4 to
track the upsampling by 2, 3, and 4 respectively.
Examples
--------
>>> y = deci24(x)
"""
# Stage 1: M = 2
b2,a2 = signal.butter(10,1/2.)
y1 = signal.lfilter(b2,a2,x)
y1 = downsample(y1,2)
# Stage 2: M = 3
b3,a3 = signal.butter(10,1/3.)
y2 = signal.lfilter(b3,a3,y1)
y2 = downsample(y2,3)
# Stage 3: L = 4
b4,a4 = signal.butter(10,1/4.)
y3 = signal.lfilter(b4,a4,y2)
y3 = downsample(y3,4)
return y3
def upsample(x,L):
"""
Upsample by factor L
Insert L - 1 zero samples in between each input sample.
Parameters
----------
x : ndarray of input signal values
L : upsample factor
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = upsample(x,3)
"""
N_input = len(x)
y = np.hstack((x.reshape(N_input,1),np.zeros((N_input, int(L-1)))))
y = y.flatten()
return y
def downsample(x,M,p=0):
"""
Downsample by factor M
Keep every Mth sample of the input. The phase of the input samples
kept can be selected.
Parameters
----------
x : ndarray of input signal values
M : downsample factor
p : phase of decimated value, 0 (default), 1, ..., M-1
Returns
-------
y : ndarray of the output signal values
Examples
--------
>>> y = downsample(x,3)
>>> y = downsample(x,3,1)
"""
if not isinstance(M, int):
raise TypeError("M must be an int")
x = x[0:int(np.floor(len(x)/M))*M]
x = x.reshape((int(np.floor(len(x)/M)),M))
y = x[:,p]
return y
def unique_cpx_roots(rlist,tol = 0.001):
"""
The average of the root values is used when multiplicity
is greater than one.
Mark Wickert October 2016
"""
uniq = [rlist[0]]
mult = [1]
for k in range(1,len(rlist)):
N_uniq = len(uniq)
for m in range(N_uniq):
if abs(rlist[k]-uniq[m]) <= tol:
mult[m] += 1
uniq[m] = (uniq[m]*(mult[m]-1) + rlist[k])/float(mult[m])
break
uniq = np.hstack((uniq,rlist[k]))
mult = np.hstack((mult,[1]))
return np.array(uniq), np.array(mult)
def zplane(b,a,auto_scale=True,size=2,detect_mult=True,tol=0.001):
"""
Create an z-plane pole-zero plot.
Create an z-plane pole-zero plot using the numerator
and denominator z-domain system function coefficient
ndarrays b and a respectively. Assume descending powers of z.
Parameters
----------
b : ndarray of the numerator coefficients
a : ndarray of the denominator coefficients
auto_scale : bool (default True)
size : plot radius maximum when scale = False
Returns
-------
(M,N) : tuple of zero and pole counts + plot window
Notes
-----
This function tries to identify repeated poles and zeros and will
place the multiplicity number above and to the right of the pole or zero.
The difficulty is setting the tolerance for this detection. Currently it
is set at 1e-3 via the function signal.unique_roots.
Examples
--------
>>> # Here the plot is generated using auto_scale
>>> zplane(b,a)
>>> # Here the plot is generated using manual scaling
>>> zplane(b,a,False,1.5)
"""
if (isinstance(a,int) or isinstance(a,float)):
a = [a]
if (isinstance(b,int) or isinstance(b,float)):
b = [b]
M = len(b) - 1
N = len(a) - 1
# Plot labels if multiplicity greater than 1
x_scale = 1.5*size
y_scale = 1.5*size
x_off = 0.02
y_off = 0.01
#N_roots = np.array([1.0])
if M > 0:
N_roots = np.roots(b)
#D_roots = np.array([1.0])
if N > 0:
D_roots = np.roots(a)
if auto_scale:
if M > 0 and N > 0:
size = max(np.max(np.abs(N_roots)),np.max(np.abs(D_roots)))+.1
elif M > 0:
size = max(np.max(np.abs(N_roots)),1.0)+.1
elif N > 0:
size = max(1.0,np.max(np.abs(D_roots)))+.1
else:
size = 1.1
plt.figure(figsize=(5,5))
plt.axis('equal')
r = np.linspace(0,2*np.pi,200)
plt.plot(np.cos(r),np.sin(r),'r--')
plt.plot([-size,size],[0,0],'k-.')
plt.plot([0,0],[-size,size],'k-.')
if M > 0:
if detect_mult == True:
N_uniq, N_mult = unique_cpx_roots(N_roots,tol=tol)
plt.plot(np.real(N_uniq),np.imag(N_uniq),'ko',mfc='None',ms=8)
idx_N_mult = np.nonzero(np.ravel(N_mult>1))[0]
for k in range(len(idx_N_mult)):
x_loc = np.real(N_uniq[idx_N_mult[k]]) + x_off*x_scale
y_loc =np.imag(N_uniq[idx_N_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(N_mult[idx_N_mult[k]]),ha='center',va='bottom',fontsize=10)
else:
plt.plot(np.real(N_roots),np.imag(N_roots),'ko',mfc='None',ms=8)
if N > 0:
if detect_mult == True:
D_uniq, D_mult=unique_cpx_roots(D_roots,tol=tol)
plt.plot(np.real(D_uniq),np.imag(D_uniq),'kx',ms=8)
idx_D_mult = np.nonzero(np.ravel(D_mult>1))[0]
for k in range(len(idx_D_mult)):
x_loc = np.real(D_uniq[idx_D_mult[k]]) + x_off*x_scale
y_loc =np.imag(D_uniq[idx_D_mult[k]]) + y_off*y_scale
plt.text(x_loc,y_loc,str(D_mult[idx_D_mult[k]]),ha='center',va='bottom',fontsize=10)
else:
plt.plot(np.real(D_roots),np.imag(D_roots),'kx',ms=8)
if M - N < 0:
plt.plot(0.0,0.0,'bo',mfc='None',ms=8)
elif M - N > 0:
plt.plot(0.0,0.0,'kx',ms=8)
if abs(M - N) > 1:
plt.text(x_off*x_scale,y_off*y_scale,str(abs(M-N)),ha='center',va='bottom',fontsize=10)
plt.xlabel('Real Part')
plt.ylabel('Imaginary Part')
plt.title('Pole-Zero Plot')
#plt.grid()
plt.axis([-size,size,-size,size])
return M,N
def rect_conv(n, n_len):
"""
The theoretical result of convolving two rectangle sequences.
The result is a triangle. The solution is
based on pure analysis. Simply coded as opposed
to efficiently coded.
Parameters
----------
n : ndarray of time axis
n_len : rectangle pulse duration
Returns
-------
y : ndarray of of output signal
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy import arange
>>> from sk_dsp_comm.sigsys import rect_conv
>>> n = arange(-5,20)
>>> y = rect_conv(n,6)
>>> plt.plot(n, y)
>>> plt.show()
"""
y = np.zeros(len(n))
for k in range(len(n)):
if n[k] >= 0 and n[k] < n_len-1:
y[k] = n[k] + 1
elif n[k] >= n_len-1 and n[k] <= 2*n_len-2:
y[k] = 2 * n_len - 1 - n[k]
return y
def biquad2(w_num, r_num, w_den, r_den):
"""
A biquadratic filter in terms of conjugate pole and zero pairs.
Parameters
----------
w_num : zero frequency (angle) in rad/sample
r_num : conjugate zeros radius
w_den : pole frequency (angle) in rad/sample
r_den : conjugate poles radius; less than 1 for stability
Returns
-------
b : ndarray of numerator coefficients
a : ndarray of denominator coefficients
Examples
--------
>>> b,a = biquad2(pi/4., 1, pi/4., 0.95)
"""
b = np.array([1, -2*r_num*np.cos(w_num), r_num**2])
a = np.array([1, -2*r_den*np.cos(w_den), r_den**2])
return b, a
def plot_na(x, y, mode='stem'):
pylab.figure(figsize=(5,2))
frame1 = pylab.gca()
if mode.lower() == 'stem':
pylab.stem(x,y)
else:
pylab.plot(x,y)
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
pylab.show()
def from_wav(filename):
"""
Read a wave file.
A wrapper function for scipy.io.wavfile.read
that also includes int16 to float [-1,1] scaling.
Parameters
----------
filename : file name string
Returns
-------
fs : sampling frequency in Hz
x : ndarray of normalized to 1 signal samples
Examples
--------
>>> fs,x = from_wav('test_file.wav')
"""
fs, x = wavfile.read(filename)
return fs, x/32767.
def to_wav(filename, rate, x):
"""
Write a wave file.
A wrapper function for scipy.io.wavfile.write
that also includes int16 scaling and conversion.
Assume input x is [-1,1] values.
Parameters
----------
filename : file name string
rate : sampling frequency in Hz
Returns
-------
Nothing : writes only the *.wav file
Examples
--------
>>> to_wav('test_file.wav', 8000, x)
"""
x16 = np.int16(x*32767)
wavfile.write(filename, rate, x16)
def bin_num(n, n_bits):
"""
Produce a signed representation of the number n using n_bits.
:param n: Number n
:param n_bits: Number of bits
:return:
"""
mask = (2 << n_bits - 1) - 1
num = int(n) & mask
f_str = '{:0' + str(n_bits) + 'b}'
f_res = f_str.format(int(num))
return f_res
|
mwickert/scikit-dsp-comm
|
sk_dsp_comm/sigsys.py
|
Python
|
bsd-2-clause
| 91,282
|
[
"Gaussian"
] |
4e881d2db925b71cb10d2e921d7c3cd1bb1757c3128470b55ba288d7afb8a0c3
|
#!/usr/bin/python
#
# Copyright 2007 Google Inc.
# Licensed to PSF under a Contributor Agreement.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittest for ipaddr module."""
import unittest
import time
import ipaddr
# Compatibility function to cast str to bytes objects
if ipaddr._compat_has_real_bytes:
_cb = lambda bytestr: bytes(bytestr, 'charmap')
else:
_cb = str
class IpaddrUnitTest(unittest.TestCase):
def setUp(self):
self.ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.ipv4_hostmask = ipaddr.IPv4Network('10.0.0.1/0.255.255.255')
self.ipv6 = ipaddr.IPv6Network('2001:658:22a:cafe:200:0:0:1/64')
def tearDown(self):
del(self.ipv4)
del(self.ipv4_hostmask)
del(self.ipv6)
del(self)
def testRepr(self):
self.assertEqual("IPv4Network('1.2.3.4/32')",
repr(ipaddr.IPv4Network('1.2.3.4')))
self.assertEqual("IPv6Network('::1/128')",
repr(ipaddr.IPv6Network('::1')))
def testInvalidStrings(self):
self.assertRaises(ValueError, ipaddr.IPNetwork, '')
self.assertRaises(ValueError, ipaddr.IPNetwork, 'www.google.com')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1.2.3')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1.2.3.4.5')
self.assertRaises(ValueError, ipaddr.IPNetwork, '301.2.2.2')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:')
self.assertRaises(ValueError, ipaddr.IPNetwork, ':2:3:4:5:6:7:8')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:8:9')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:7:8:')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1::3:4:5:6::8')
self.assertRaises(ValueError, ipaddr.IPNetwork, 'a:')
self.assertRaises(ValueError, ipaddr.IPNetwork, ':')
self.assertRaises(ValueError, ipaddr.IPNetwork, ':::')
self.assertRaises(ValueError, ipaddr.IPNetwork, '::a:')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1ffff::')
self.assertRaises(ValueError, ipaddr.IPNetwork, '0xa::')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:6:1a.2.3.4')
self.assertRaises(ValueError, ipaddr.IPNetwork, '1:2:3:4:5:1.2.3.4:8')
self.assertRaises(ipaddr.IPv4IpValidationError, ipaddr.IPv4Network, '')
self.assertRaises(ipaddr.IPv4IpValidationError, ipaddr.IPv4Network,
'google.com')
self.assertRaises(ipaddr.IPv4IpValidationError, ipaddr.IPv4Network,
'::1.2.3.4')
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6Network, '')
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6Network,
'google.com')
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6Network,
'1.2.3.4')
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6Network,
'1234:axy::b')
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6Address,
'1234:axy::b')
def testGetNetwork(self):
self.assertEqual(int(self.ipv4.network), 16909056)
self.assertEqual(str(self.ipv4.network), '1.2.3.0')
self.assertEqual(str(self.ipv4_hostmask.network), '10.0.0.0')
self.assertEqual(int(self.ipv6.network),
42540616829182469433403647294022090752)
self.assertEqual(str(self.ipv6.network),
'2001:658:22a:cafe::')
self.assertEqual(str(self.ipv6.hostmask),
'::ffff:ffff:ffff:ffff')
def testHash(self):
self.assertEqual(hash(self.ipv4.network), 16909056)
self.assertNotEqual(hash(self.ipv4.network),
hash(self.ipv4.broadcast))
self.assertEqual(hash(self.ipv6.network),
2306131459253652222)
self.assertNotEqual(hash(self.ipv6.network),
hash(self.ipv6.broadcast))
def testIpFromInt(self):
self.assertEqual(self.ipv4.ip, ipaddr.IPv4Network(16909060).ip)
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4Network, 2**32)
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4Network, -1)
ipv4 = ipaddr.IPNetwork('1.2.3.4')
ipv6 = ipaddr.IPNetwork('2001:658:22a:cafe:200:0:0:1')
self.assertEqual(ipv4, ipaddr.IPNetwork(int(ipv4)))
self.assertEqual(ipv6, ipaddr.IPNetwork(int(ipv6)))
v6_int = 42540616829182469433547762482097946625
self.assertEqual(self.ipv6.ip, ipaddr.IPv6Network(v6_int).ip)
self.assertRaises(ipaddr.IPv6IpValidationError,
ipaddr.IPv6Network, 2**128)
self.assertRaises(ipaddr.IPv6IpValidationError,
ipaddr.IPv6Network, -1)
self.assertEqual(ipaddr.IPNetwork(self.ipv4.ip).version, 4)
self.assertEqual(ipaddr.IPNetwork(self.ipv6.ip).version, 6)
if ipaddr._compat_has_real_bytes: # on python3+
def testIpFromPacked(self):
ip = ipaddr.IP
self.assertEqual(self.ipv4.ip,
ip(_cb('\x01\x02\x03\x04')).ip)
self.assertEqual(ip('255.254.253.252'),
ip(_cb('\xff\xfe\xfd\xfc')))
self.assertRaises(ValueError, ipaddr.IP, _cb('\x00' * 3))
self.assertRaises(ValueError, ipaddr.IP, _cb('\x00' * 5))
self.assertEqual(self.ipv6.ip,
ip(_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01')).ip)
self.assertEqual(ip('ffff:2:3:4:ffff::'),
ip(_cb('\xff\xff\x00\x02\x00\x03\x00\x04' +
'\xff\xff' + '\x00' * 6)))
self.assertEqual(ip('::'),
ip(_cb('\x00' * 16)))
self.assertRaises(ValueError, ip, _cb('\x00' * 15))
self.assertRaises(ValueError, ip, _cb('\x00' * 17))
def testGetIp(self):
self.assertEqual(int(self.ipv4.ip), 16909060)
self.assertEqual(str(self.ipv4.ip), '1.2.3.4')
self.assertEqual(str(self.ipv4_hostmask.ip), '10.0.0.1')
self.assertEqual(int(self.ipv6.ip),
42540616829182469433547762482097946625)
self.assertEqual(str(self.ipv6.ip),
'2001:658:22a:cafe:200::1')
def testGetNetmask(self):
self.assertEqual(int(self.ipv4.netmask), 4294967040L)
self.assertEqual(str(self.ipv4.netmask), '255.255.255.0')
self.assertEqual(str(self.ipv4_hostmask.netmask), '255.0.0.0')
self.assertEqual(int(self.ipv6.netmask),
340282366920938463444927863358058659840)
self.assertEqual(self.ipv6.prefixlen, 64)
def testZeroNetmask(self):
ipv4_zero_netmask = ipaddr.IPv4Network('1.2.3.4/0')
self.assertEqual(int(ipv4_zero_netmask.netmask), 0)
self.assert_(ipv4_zero_netmask._is_valid_netmask(str(0)))
ipv6_zero_netmask = ipaddr.IPv6Network('::1/0')
self.assertEqual(int(ipv6_zero_netmask.netmask), 0)
self.assert_(ipv6_zero_netmask._is_valid_netmask(str(0)))
def testGetBroadcast(self):
self.assertEqual(int(self.ipv4.broadcast), 16909311L)
self.assertEqual(str(self.ipv4.broadcast), '1.2.3.255')
self.assertEqual(int(self.ipv6.broadcast),
42540616829182469451850391367731642367)
self.assertEqual(str(self.ipv6.broadcast),
'2001:658:22a:cafe:ffff:ffff:ffff:ffff')
def testGetPrefixlen(self):
self.assertEqual(self.ipv4.prefixlen, 24)
self.assertEqual(self.ipv6.prefixlen, 64)
def testGetSupernet(self):
self.assertEqual(self.ipv4.supernet().prefixlen, 23)
self.assertEqual(str(self.ipv4.supernet().network), '1.2.2.0')
self.assertEqual(ipaddr.IPv4Network('0.0.0.0/0').supernet(),
ipaddr.IPv4Network('0.0.0.0/0'))
self.assertEqual(self.ipv6.supernet().prefixlen, 63)
self.assertEqual(str(self.ipv6.supernet().network),
'2001:658:22a:cafe::')
self.assertEqual(ipaddr.IPv6Network('::0/0').supernet(),
ipaddr.IPv6Network('::0/0'))
def testGetSupernet3(self):
self.assertEqual(self.ipv4.supernet(3).prefixlen, 21)
self.assertEqual(str(self.ipv4.supernet(3).network), '1.2.0.0')
self.assertEqual(self.ipv6.supernet(3).prefixlen, 61)
self.assertEqual(str(self.ipv6.supernet(3).network),
'2001:658:22a:caf8::')
def testGetSupernet4(self):
self.assertRaises(ValueError, self.ipv4.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv4.supernet, new_prefix=25)
self.assertEqual(self.ipv4.supernet(prefixlen_diff=2),
self.ipv4.supernet(new_prefix=22))
self.assertRaises(ValueError, self.ipv6.supernet, prefixlen_diff=2,
new_prefix=1)
self.assertRaises(ValueError, self.ipv6.supernet, new_prefix=65)
self.assertEqual(self.ipv6.supernet(prefixlen_diff=2),
self.ipv6.supernet(new_prefix=62))
def testFancySubnetting(self):
self.assertEqual(sorted(self.ipv4.subnet(prefixlen_diff=3)),
sorted(self.ipv4.subnet(new_prefix=27)))
self.assertRaises(ValueError, self.ipv4.subnet, new_prefix=23)
self.assertRaises(ValueError, self.ipv4.subnet,
prefixlen_diff=3, new_prefix=27)
self.assertEqual(sorted(self.ipv6.subnet(prefixlen_diff=4)),
sorted(self.ipv6.subnet(new_prefix=68)))
self.assertRaises(ValueError, self.ipv6.subnet, new_prefix=63)
self.assertRaises(ValueError, self.ipv6.subnet,
prefixlen_diff=4, new_prefix=68)
def testGetSubnet(self):
self.assertEqual(self.ipv4.subnet()[0].prefixlen, 25)
self.assertEqual(str(self.ipv4.subnet()[0].network), '1.2.3.0')
self.assertEqual(str(self.ipv4.subnet()[1].network), '1.2.3.128')
self.assertEqual(self.ipv6.subnet()[0].prefixlen, 65)
def testGetSubnetForSingle32(self):
ip = ipaddr.IPv4Network('1.2.3.4/32')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['1.2.3.4/32'])
self.assertEqual(subnets1, subnets2)
def testGetSubnetForSingle128(self):
ip = ipaddr.IPv6Network('::1/128')
subnets1 = [str(x) for x in ip.subnet()]
subnets2 = [str(x) for x in ip.subnet(2)]
self.assertEqual(subnets1, ['::1/128'])
self.assertEqual(subnets1, subnets2)
def testSubnet2(self):
ips = [str(x) for x in self.ipv4.subnet(2)]
self.assertEqual(
ips,
['1.2.3.0/26', '1.2.3.64/26', '1.2.3.128/26', '1.2.3.192/26'])
ipsv6 = [str(x) for x in self.ipv6.subnet(2)]
self.assertEqual(
ipsv6,
['2001:658:22a:cafe::/66',
'2001:658:22a:cafe:4000::/66',
'2001:658:22a:cafe:8000::/66',
'2001:658:22a:cafe:c000::/66'])
def testSubnetFailsForLargeCidrDiff(self):
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv4.subnet, 9)
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv6.subnet,
65)
def testSupernetFailsForLargeCidrDiff(self):
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv4.supernet,
25)
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv6.supernet,
65)
def testSubnetFailsForNegativeCidrDiff(self):
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv4.subnet,
-1)
self.assertRaises(ipaddr.PrefixlenDiffInvalidError, self.ipv6.subnet,
-1)
def testGetNumHosts(self):
self.assertEqual(self.ipv4.numhosts, 256)
self.assertEqual(self.ipv4.subnet()[0].numhosts, 128)
self.assertEqual(self.ipv4.supernet().numhosts, 512)
self.assertEqual(self.ipv6.numhosts, 18446744073709551616)
self.assertEqual(self.ipv6.subnet()[0].numhosts, 9223372036854775808)
self.assertEqual(self.ipv6.supernet().numhosts, 36893488147419103232)
def testContains(self):
self.assertTrue(ipaddr.IPv4Network('1.2.3.128/25') in self.ipv4)
self.assertFalse(ipaddr.IPv4Network('1.2.4.1/24') in self.ipv4)
self.assertFalse(self.ipv4 in self.ipv6)
self.assertFalse(self.ipv6 in self.ipv4)
self.assertTrue(self.ipv4 in self.ipv4)
self.assertTrue(self.ipv6 in self.ipv6)
# We can test addresses and string as well.
addr1str = '1.2.3.37'
addr1 = ipaddr.IPv4Address(addr1str)
self.assertTrue(addr1 in self.ipv4)
self.assertTrue(int(addr1) in self.ipv4)
self.assertTrue(addr1str in self.ipv4)
def testBadAddress(self):
self.assertRaises(ipaddr.IPv4IpValidationError, ipaddr.IPv4Network,
'poop')
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4Network, '1.2.3.256')
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6Network,
'poopv6')
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4Network, '1.2.3.4/32/24')
self.assertRaises(ipaddr.IPv4IpValidationError,
ipaddr.IPv4Network, '10/8')
self.assertRaises(ipaddr.IPv6IpValidationError,
ipaddr.IPv6Network, '10/8')
def testBadNetMask(self):
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
ipaddr.IPv4Network, '1.2.3.4/')
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
ipaddr.IPv4Network, '1.2.3.4/33')
self.assertRaises(ipaddr.IPv4NetmaskValidationError,
ipaddr.IPv4Network, '1.2.3.4/254.254.255.256')
self.assertRaises(ipaddr.IPv6NetmaskValidationError,
ipaddr.IPv6Network, '::1/')
self.assertRaises(ipaddr.IPv6NetmaskValidationError,
ipaddr.IPv6Network, '::1/129')
def testNth(self):
self.assertEqual(str(self.ipv4[5]), '1.2.3.5')
self.assertRaises(IndexError, self.ipv4.__getitem__, 256)
self.assertEqual(str(self.ipv6[5]),
'2001:658:22a:cafe::5')
def testGetitem(self):
# http://code.google.com/p/ipaddr-py/issues/detail?id=15
addr = ipaddr.IPv4Network('172.31.255.128/255.255.255.240')
self.assertEqual(28, addr.prefixlen)
addr_list = list(addr)
self.assertEqual('172.31.255.128', str(addr_list[0]))
self.assertEqual('172.31.255.128', str(addr[0]))
self.assertEqual('172.31.255.143', str(addr_list[-1]))
self.assertEqual('172.31.255.143', str(addr[-1]))
self.assertEqual(addr_list[-1], addr[-1])
def testEquals(self):
self.assertTrue(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/24'))
self.assertFalse(self.ipv4 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv4 == ipaddr.IPv4Network('1.2.3.5/24'))
self.assertFalse(self.ipv4 == ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertFalse(self.ipv4 == '')
self.assertFalse(self.ipv4 == [])
self.assertFalse(self.ipv4 == 2)
self.assertTrue(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertFalse(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertFalse(self.ipv6 ==
ipaddr.IPv6Network('2001:658:22a:cafe:200::2/64'))
self.assertFalse(self.ipv6 == ipaddr.IPv4Network('1.2.3.4/23'))
self.assertFalse(self.ipv6 == '')
self.assertFalse(self.ipv6 == [])
self.assertFalse(self.ipv6 == 2)
def testNotEquals(self):
self.assertFalse(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/24'))
self.assertTrue(self.ipv4 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv4 != ipaddr.IPv4Network('1.2.3.5/24'))
self.assertTrue(self.ipv4 != ipaddr.IPv6Network('::1.2.3.4/24'))
self.assertTrue(self.ipv4 != '')
self.assertTrue(self.ipv4 != [])
self.assertTrue(self.ipv4 != 2)
self.assertFalse(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/64'))
self.assertTrue(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::1/63'))
self.assertTrue(self.ipv6 !=
ipaddr.IPv6Network('2001:658:22a:cafe:200::2/64'))
self.assertTrue(self.ipv6 != ipaddr.IPv4Network('1.2.3.4/23'))
self.assertTrue(self.ipv6 != '')
self.assertTrue(self.ipv6 != [])
self.assertTrue(self.ipv6 != 2)
def testSlash32Constructor(self):
self.assertEquals(str(ipaddr.IPv4Network('1.2.3.4/255.255.255.255')),
'1.2.3.4/32')
def testSlash128Constructor(self):
self.assertEquals(str(ipaddr.IPv6Network('::1/128')),
'::1/128')
def testSlash0Constructor(self):
self.assertEquals(str(ipaddr.IPv4Network('1.2.3.4/0.0.0.0')),
'1.2.3.4/0')
def testCollapsing(self):
# test only IP addresses including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Address('1.1.1.4')
ip6 = ipaddr.IPv4Address('1.1.1.0')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/30'),
ipaddr.IPv4Network('1.1.1.4/32')])
# test a mix of IP addresses and networks including some duplicates
ip1 = ipaddr.IPv4Address('1.1.1.0')
ip2 = ipaddr.IPv4Address('1.1.1.1')
ip3 = ipaddr.IPv4Address('1.1.1.2')
ip4 = ipaddr.IPv4Address('1.1.1.3')
ip5 = ipaddr.IPv4Network('1.1.1.4/30')
ip6 = ipaddr.IPv4Network('1.1.1.4/30')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip5, ip1, ip2, ip3, ip4, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.1.0/29')])
# test only IP networks
ip1 = ipaddr.IPv4Network('1.1.0.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.0/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
ip4 = ipaddr.IPv4Network('1.1.3.0/24')
ip5 = ipaddr.IPv4Network('1.1.4.0/24')
# stored in no particular order b/c we want CollapseAddr to call [].sort
ip6 = ipaddr.IPv4Network('1.1.0.0/22')
# check that addreses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3, ip4, ip5, ip6])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/22'),
ipaddr.IPv4Network('1.1.4.0/24')])
# test that two addresses are supernet'ed properly
collapsed = ipaddr.collapse_address_list([ip1, ip2])
self.assertEqual(collapsed, [ipaddr.IPv4Network('1.1.0.0/23')])
# test same IP networks
ip_same1 = ip_same2 = ipaddr.IPv4Network('1.1.1.1/32')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ip_same1])
# test same IP addresses
ip_same1 = ip_same2 = ipaddr.IPv4Address('1.1.1.1')
self.assertEqual(ipaddr.collapse_address_list([ip_same1, ip_same2]),
[ip_same1])
ip1 = ipaddr.IPv6Network('::2001:1/100')
ip2 = ipaddr.IPv6Network('::2002:1/120')
ip3 = ipaddr.IPv6Network('::2001:1/96')
# test that ipv6 addresses are subsumed properly.
collapsed = ipaddr.collapse_address_list([ip1, ip2, ip3])
self.assertEqual(collapsed, [ip3])
# the toejam test
ip1 = ipaddr.IPAddress('1.1.1.1')
ip2 = ipaddr.IPAddress('::1')
self.assertRaises(ipaddr.IPTypeError, ipaddr.collapse_address_list,
[ip1, ip2])
def testSummarizing(self):
#ip = ipaddr.IPAddress
#ipnet = ipaddr.IPNetwork
summarize = ipaddr.summarize_address_range
ip1 = ipaddr.IPAddress('1.1.1.0')
ip2 = ipaddr.IPAddress('1.1.1.255')
# test a /24 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1.1.1.0/24'))
# test an IPv4 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('1.1.1.8')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1.1.1.0/29'),
ipaddr.IPNetwork('1.1.1.8')])
ip1 = ipaddr.IPAddress('1::')
ip2 = ipaddr.IPAddress('1:ffff:ffff:ffff:ffff:ffff:ffff:ffff')
# test a IPv6 is sumamrized properly
self.assertEqual(summarize(ip1, ip2)[0], ipaddr.IPNetwork('1::/16'))
# test an IPv6 range that isn't on a network byte boundary
ip2 = ipaddr.IPAddress('2::')
self.assertEqual(summarize(ip1, ip2), [ipaddr.IPNetwork('1::/16'),
ipaddr.IPNetwork('2::/128')])
# test exception raised when first is greater than last
self.assertRaises(ValueError, summarize, ipaddr.IPAddress('1.1.1.0'),
ipaddr.IPAddress('1.1.0.0'))
# test exception raised when first and last aren't IP addresses
self.assertRaises(ipaddr.IPTypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'),
ipaddr.IPNetwork('1.1.0.0'))
self.assertRaises(ipaddr.IPTypeError, summarize,
ipaddr.IPNetwork('1.1.1.0'), ipaddr.IPNetwork('1.1.0.0'))
# test exception raised when first and last are not same version
self.assertRaises(ipaddr.IPTypeError, summarize, ipaddr.IPAddress('::'),
ipaddr.IPNetwork('1.1.0.0'))
def testAddressComparison(self):
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.1'))
self.assertTrue(ipaddr.IPAddress('1.1.1.1') <=
ipaddr.IPAddress('1.1.1.2'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::1'))
self.assertTrue(ipaddr.IPAddress('::1') <= ipaddr.IPAddress('::2'))
def testNetworkComparison(self):
# ip1 and ip2 have the same network address
ip1 = ipaddr.IPv4Network('1.1.1.0/24')
ip2 = ipaddr.IPv4Network('1.1.1.1/24')
ip3 = ipaddr.IPv4Network('1.1.2.0/24')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEquals(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEquals(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
ip1 = ipaddr.IPv6Network('2001::2000/96')
ip2 = ipaddr.IPv6Network('2001::2001/96')
ip3 = ipaddr.IPv6Network('2001:ffff::2000/96')
self.assertTrue(ip1 < ip3)
self.assertTrue(ip3 > ip2)
self.assertEquals(ip1.compare_networks(ip2), 0)
self.assertTrue(ip1._get_networks_key() == ip2._get_networks_key())
self.assertEquals(ip1.compare_networks(ip3), -1)
self.assertTrue(ip1._get_networks_key() < ip3._get_networks_key())
# Test comparing different protocols
ipv6 = ipaddr.IPv6Network('::/0')
ipv4 = ipaddr.IPv4Network('0.0.0.0/0')
self.assertTrue(ipv6 > ipv4)
self.assertTrue(ipv4 < ipv6)
# Regression test for issue 19.
ip1 = ipaddr.IPNetwork('10.1.2.128/25')
self.assertFalse(ip1 < ip1)
self.assertFalse(ip1 > ip1)
ip2 = ipaddr.IPNetwork('10.1.3.0/24')
self.assertTrue(ip1 < ip2)
self.assertFalse(ip2 < ip1)
self.assertFalse(ip1 > ip2)
self.assertTrue(ip2 > ip1)
ip3 = ipaddr.IPNetwork('10.1.3.0/25')
self.assertTrue(ip2 < ip3)
self.assertFalse(ip3 < ip2)
self.assertFalse(ip2 > ip3)
self.assertTrue(ip3 > ip2)
# <=, >=
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('1.1.1.1') <=
ipaddr.IPNetwork('1.1.1.2'))
self.assertFalse(ipaddr.IPNetwork('1.1.1.2') <=
ipaddr.IPNetwork('1.1.1.1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::1'))
self.assertTrue(ipaddr.IPNetwork('::1') <= ipaddr.IPNetwork('::2'))
self.assertFalse(ipaddr.IPNetwork('::2') <= ipaddr.IPNetwork('::1'))
def testEmbeddedIpv4(self):
ipv4_string = '192.168.0.1'
ipv4 = ipaddr.IPv4Network(ipv4_string)
v4compat_ipv6 = ipaddr.IPv6Network('::%s' % ipv4_string)
self.assertEquals(int(v4compat_ipv6.ip), int(ipv4.ip))
v4mapped_ipv6 = ipaddr.IPv6Network('::ffff:%s' % ipv4_string)
self.assertNotEquals(v4mapped_ipv6.ip, ipv4.ip)
self.assertRaises(ipaddr.IPv6IpValidationError, ipaddr.IPv6Network,
'2001:1.1.1.1:1.1.1.1')
def testIPVersion(self):
self.assertEqual(self.ipv4.version, 4)
self.assertEqual(self.ipv6.version, 6)
def testPacked(self):
self.assertEqual(self.ipv4.packed,
_cb('\x01\x02\x03\x04'))
self.assertEqual(ipaddr.IPv4Network('255.254.253.252').packed,
_cb('\xff\xfe\xfd\xfc'))
self.assertEqual(self.ipv6.packed,
_cb('\x20\x01\x06\x58\x02\x2a\xca\xfe'
'\x02\x00\x00\x00\x00\x00\x00\x01'))
self.assertEqual(ipaddr.IPv6Network('ffff:2:3:4:ffff::').packed,
_cb('\xff\xff\x00\x02\x00\x03\x00\x04\xff\xff'
+ '\x00' * 6))
self.assertEqual(ipaddr.IPv6Network('::1:0:0:0:0').packed,
_cb('\x00' * 6 + '\x00\x01' + '\x00' * 8))
def testIpStrFromPrefixlen(self):
ipv4 = ipaddr.IPv4Network('1.2.3.4/24')
self.assertEquals(ipv4._ip_string_from_prefix(), '255.255.255.0')
self.assertEquals(ipv4._ip_string_from_prefix(28), '255.255.255.240')
def testIpType(self):
ipv4net = ipaddr.IPNetwork('1.2.3.4')
ipv4addr = ipaddr.IPAddress('1.2.3.4')
ipv6net = ipaddr.IPNetwork('::1.2.3.4')
ipv6addr = ipaddr.IPAddress('::1.2.3.4')
self.assertEquals(ipaddr.IPv4Network, type(ipv4net))
self.assertEquals(ipaddr.IPv4Address, type(ipv4addr))
self.assertEquals(ipaddr.IPv6Network, type(ipv6net))
self.assertEquals(ipaddr.IPv6Address, type(ipv6addr))
def testReservedIpv4(self):
# test networks
self.assertEquals(True, ipaddr.IPNetwork('224.1.1.1/31').is_multicast)
self.assertEquals(False, ipaddr.IPNetwork('240.0.0.0').is_multicast)
self.assertEquals(True, ipaddr.IPNetwork('192.168.1.1/17').is_private)
self.assertEquals(False, ipaddr.IPNetwork('192.169.0.0').is_private)
self.assertEquals(True, ipaddr.IPNetwork('10.255.255.255').is_private)
self.assertEquals(False, ipaddr.IPNetwork('11.0.0.0').is_private)
self.assertEquals(True, ipaddr.IPNetwork('172.31.255.255').is_private)
self.assertEquals(False, ipaddr.IPNetwork('172.32.0.0').is_private)
self.assertEquals(True,
ipaddr.IPNetwork('169.254.100.200/24').is_link_local)
self.assertEquals(False,
ipaddr.IPNetwork('169.255.100.200/24').is_link_local)
self.assertEquals(True,
ipaddr.IPNetwork('127.100.200.254/32').is_loopback)
self.assertEquals(True, ipaddr.IPNetwork('127.42.0.0/16').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('128.0.0.0').is_loopback)
# test addresses
self.assertEquals(True, ipaddr.IPAddress('224.1.1.1').is_multicast)
self.assertEquals(False, ipaddr.IPAddress('240.0.0.0').is_multicast)
self.assertEquals(True, ipaddr.IPAddress('192.168.1.1').is_private)
self.assertEquals(False, ipaddr.IPAddress('192.169.0.0').is_private)
self.assertEquals(True, ipaddr.IPAddress('10.255.255.255').is_private)
self.assertEquals(False, ipaddr.IPAddress('11.0.0.0').is_private)
self.assertEquals(True, ipaddr.IPAddress('172.31.255.255').is_private)
self.assertEquals(False, ipaddr.IPAddress('172.32.0.0').is_private)
self.assertEquals(True,
ipaddr.IPAddress('169.254.100.200').is_link_local)
self.assertEquals(False,
ipaddr.IPAddress('169.255.100.200').is_link_local)
self.assertEquals(True,
ipaddr.IPAddress('127.100.200.254').is_loopback)
self.assertEquals(True, ipaddr.IPAddress('127.42.0.0').is_loopback)
self.assertEquals(False, ipaddr.IPAddress('128.0.0.0').is_loopback)
def testReservedIpv6(self):
self.assertEquals(True, ipaddr.IPNetwork('ffff::').is_multicast)
self.assertEquals(True, ipaddr.IPNetwork(2**128-1).is_multicast)
self.assertEquals(True, ipaddr.IPNetwork('ff00::').is_multicast)
self.assertEquals(False, ipaddr.IPNetwork('fdff::').is_multicast)
self.assertEquals(True, ipaddr.IPNetwork('fecf::').is_site_local)
self.assertEquals(True, ipaddr.IPNetwork(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPNetwork('fbf:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPNetwork('ff00::').is_site_local)
self.assertEquals(True, ipaddr.IPNetwork('fc00::').is_private)
self.assertEquals(True, ipaddr.IPNetwork(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPNetwork('fbff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPNetwork('fe00::').is_private)
self.assertEquals(True, ipaddr.IPNetwork('fea0::').is_link_local)
self.assertEquals(True, ipaddr.IPNetwork('febf:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPNetwork('fe7f:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPNetwork('fec0::').is_link_local)
self.assertEquals(True, ipaddr.IPNetwork('0:0::0:01').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('::1/127').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('::').is_loopback)
self.assertEquals(False, ipaddr.IPNetwork('::2').is_loopback)
self.assertEquals(True, ipaddr.IPNetwork('0::0').is_unspecified)
self.assertEquals(False, ipaddr.IPNetwork('::1').is_unspecified)
self.assertEquals(False, ipaddr.IPNetwork('::/127').is_unspecified)
# test addresses
self.assertEquals(True, ipaddr.IPAddress('ffff::').is_multicast)
self.assertEquals(True, ipaddr.IPAddress(2**128-1).is_multicast)
self.assertEquals(True, ipaddr.IPAddress('ff00::').is_multicast)
self.assertEquals(False, ipaddr.IPAddress('fdff::').is_multicast)
self.assertEquals(True, ipaddr.IPAddress('fecf::').is_site_local)
self.assertEquals(True, ipaddr.IPAddress(
'feff:ffff:ffff:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPAddress('fbf:ffff::').is_site_local)
self.assertEquals(False, ipaddr.IPAddress('ff00::').is_site_local)
self.assertEquals(True, ipaddr.IPAddress('fc00::').is_private)
self.assertEquals(True, ipaddr.IPAddress(
'fc00:ffff:ffff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPAddress('fbff:ffff::').is_private)
self.assertEquals(False, ipaddr.IPAddress('fe00::').is_private)
self.assertEquals(True, ipaddr.IPAddress('fea0::').is_link_local)
self.assertEquals(True, ipaddr.IPAddress('febf:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPAddress('fe7f:ffff::').is_link_local)
self.assertEquals(False, ipaddr.IPAddress('fec0::').is_link_local)
self.assertEquals(True, ipaddr.IPAddress('0:0::0:01').is_loopback)
self.assertEquals(True, ipaddr.IPAddress('::1').is_loopback)
self.assertEquals(False, ipaddr.IPAddress('::2').is_loopback)
self.assertEquals(True, ipaddr.IPAddress('0::0').is_unspecified)
self.assertEquals(False, ipaddr.IPAddress('::1').is_unspecified)
# some generic IETF reserved addresses
self.assertEquals(True, ipaddr.IPAddress('100::').is_reserved)
self.assertEquals(True, ipaddr.IPNetwork('4000::1/128').is_reserved)
def testIpv4Mapped(self):
self.assertEqual(ipaddr.IPAddress('::ffff:192.168.1.1').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
self.assertEqual(ipaddr.IPAddress('::c0a8:101').ipv4_mapped, None)
self.assertEqual(ipaddr.IPAddress('::ffff:c0a8:101').ipv4_mapped,
ipaddr.IPAddress('192.168.1.1'))
def testAddrExclude(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork('10.1.1.0/26')
addr3 = ipaddr.IPNetwork('10.2.1.0/24')
self.assertEqual(addr1.address_exclude(addr2),
[ipaddr.IPNetwork('10.1.1.64/26'),
ipaddr.IPNetwork('10.1.1.128/25')])
self.assertRaises(ValueError, addr1.address_exclude, addr3)
def testHash(self):
self.assertEquals(hash(ipaddr.IPNetwork('10.1.1.0/24')),
hash(ipaddr.IPNetwork('10.1.1.0/24')))
self.assertEquals(hash(ipaddr.IPAddress('10.1.1.0')),
hash(ipaddr.IPAddress('10.1.1.0')))
ip1 = ipaddr.IPAddress('10.1.1.0')
ip2 = ipaddr.IPAddress('1::')
dummy = {}
dummy[self.ipv4] = None
dummy[self.ipv6] = None
dummy[ip1] = None
dummy[ip2] = None
self.assertTrue(self.ipv4 in dummy)
self.assertTrue(ip2 in dummy)
def testCopyConstructor(self):
addr1 = ipaddr.IPNetwork('10.1.1.0/24')
addr2 = ipaddr.IPNetwork(addr1)
addr3 = ipaddr.IPNetwork('2001:658:22a:cafe:200::1/64')
addr4 = ipaddr.IPNetwork(addr3)
addr5 = ipaddr.IPv4Address('1.1.1.1')
addr6 = ipaddr.IPv6Address('2001:658:22a:cafe:200::1')
self.assertEqual(addr1, addr2)
self.assertEqual(addr3, addr4)
self.assertEqual(addr5, ipaddr.IPv4Address(addr5))
self.assertEqual(addr6, ipaddr.IPv6Address(addr6))
def testCompressIPv6Address(self):
test_addresses = {
'1:2:3:4:5:6:7:8': '1:2:3:4:5:6:7:8/128',
'2001:0:0:4:0:0:0:8': '2001:0:0:4::8/128',
'2001:0:0:4:5:6:7:8': '2001::4:5:6:7:8/128',
'2001:0:3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'2001:0::3:4:5:6:7:8': '2001:0:3:4:5:6:7:8/128',
'0:0:3:0:0:0:0:ffff': '0:0:3::ffff/128',
'0:0:0:4:0:0:0:ffff': '::4:0:0:0:ffff/128',
'0:0:0:0:5:0:0:ffff': '::5:0:0:ffff/128',
'1:0:0:4:0:0:7:8': '1::4:0:0:7:8/128',
'0:0:0:0:0:0:0:0': '::/128',
'0:0:0:0:0:0:0:0/0': '::/0',
'0:0:0:0:0:0:0:1': '::1/128',
'2001:0658:022a:cafe:0000:0000:0000:0000/66':
'2001:658:22a:cafe::/66',
}
for uncompressed, compressed in test_addresses.items():
self.assertEquals(compressed, str(ipaddr.IPv6Network(uncompressed)))
def testExplodeShortHandIpStr(self):
addr1 = ipaddr.IPv6Network('2001::1')
self.assertEqual('2001:0000:0000:0000:0000:0000:0000:0001',
addr1._explode_shorthand_ip_string(str(addr1.ip)))
def testIntRepresentation(self):
self.assertEqual(16909060, int(self.ipv4))
self.assertEqual(42540616829182469433547762482097946625, int(self.ipv6))
def testHexRepresentation(self):
self.assertEqual(hex(0x1020304),
hex(self.ipv4))
self.assertEqual(hex(0x20010658022ACAFE0200000000000001),
hex(self.ipv6))
# backwards compatibility
def testBackwardsCompability(self):
self.assertEqual(ipaddr.CollapseAddrList(
[ipaddr.IPNetwork('1.1.0.0/24'), ipaddr.IPNetwork('1.1.1.0/24')]),
[ipaddr.IPNetwork('1.1.0.0/23')])
self.assertEqual(ipaddr.IPNetwork('::42:0/112').AddressExclude(
ipaddr.IPNetwork('::42:8000/113')),
[ipaddr.IPNetwork('::42:0/113')])
self.assertTrue(ipaddr.IPNetwork('1::/8').CompareNetworks(
ipaddr.IPNetwork('2::/9')) < 0)
self.assertEqual(ipaddr.IPNetwork('1::/16').Contains(
ipaddr.IPNetwork('2::/16')), False)
self.assertEqual(ipaddr.IPNetwork('0.0.0.0/0').Subnet(),
[ipaddr.IPNetwork('0.0.0.0/1'),
ipaddr.IPNetwork('128.0.0.0/1')])
self.assertEqual(ipaddr.IPNetwork('::/127').Subnet(),
[ipaddr.IPNetwork('::/128'),
ipaddr.IPNetwork('::1/128')])
self.assertEqual(ipaddr.IPNetwork('1.0.0.0/32').Supernet(),
ipaddr.IPNetwork('1.0.0.0/31'))
self.assertEqual(ipaddr.IPNetwork('::/121').Supernet(),
ipaddr.IPNetwork('::/120'))
self.assertEqual(ipaddr.IPNetwork('10.0.0.02').IsRFC1918(), True)
self.assertEqual(ipaddr.IPNetwork('10.0.0.0').IsMulticast(), False)
self.assertEqual(ipaddr.IPNetwork('127.255.255.255').IsLoopback(), True)
self.assertEqual(ipaddr.IPNetwork('169.255.255.255').IsLinkLocal(),
False)
def testForceVersion(self):
self.assertEqual(ipaddr.IPNetwork(1).version, 4)
self.assertEqual(ipaddr.IPNetwork(1, version=6).version, 6)
def testWithStar(self):
self.assertEqual(str(self.ipv4.with_prefixlen), "1.2.3.4/24")
self.assertEqual(str(self.ipv4.with_netmask), "1.2.3.4/255.255.255.0")
self.assertEqual(str(self.ipv4.with_hostmask), "1.2.3.4/0.0.0.255")
self.assertEqual(str(self.ipv6.with_prefixlen),
'2001:658:22a:cafe:200::1/64')
# these two probably don't make much sense, but they're included for
# compatability with ipv4
self.assertEqual(str(self.ipv6.with_netmask),
'2001:658:22a:cafe:200::1/ffff:ffff:ffff:ffff::')
self.assertEqual(str(self.ipv6.with_hostmask),
'2001:658:22a:cafe:200::1/::ffff:ffff:ffff:ffff')
def testNetworkElementCaching(self):
# V4 - make sure we're empty
self.assertFalse(self.ipv4._cache.has_key('network'))
self.assertFalse(self.ipv4._cache.has_key('broadcast'))
self.assertFalse(self.ipv4._cache.has_key('hostmask'))
# V4 - populate and test
self.assertEqual(self.ipv4.network, ipaddr.IPv4Address('1.2.3.0'))
self.assertEqual(self.ipv4.broadcast, ipaddr.IPv4Address('1.2.3.255'))
self.assertEqual(self.ipv4.hostmask, ipaddr.IPv4Address('0.0.0.255'))
# V4 - check we're cached
self.assertTrue(self.ipv4._cache.has_key('network'))
self.assertTrue(self.ipv4._cache.has_key('broadcast'))
self.assertTrue(self.ipv4._cache.has_key('hostmask'))
# V6 - make sure we're empty
self.assertFalse(self.ipv6._cache.has_key('network'))
self.assertFalse(self.ipv6._cache.has_key('broadcast'))
self.assertFalse(self.ipv6._cache.has_key('hostmask'))
# V6 - populate and test
self.assertEqual(self.ipv6.network,
ipaddr.IPv6Address('2001:658:22a:cafe::'))
self.assertEqual(self.ipv6.broadcast, ipaddr.IPv6Address(
'2001:658:22a:cafe:ffff:ffff:ffff:ffff'))
self.assertEqual(self.ipv6.hostmask,
ipaddr.IPv6Address('::ffff:ffff:ffff:ffff'))
# V6 - check we're cached
self.assertTrue(self.ipv6._cache.has_key('network'))
self.assertTrue(self.ipv6._cache.has_key('broadcast'))
self.assertTrue(self.ipv6._cache.has_key('hostmask'))
if __name__ == '__main__':
unittest.main()
|
nouiz/fredericbastien-ipaddr-py-speed-up
|
branches/2.0.x/ipaddr_test.py
|
Python
|
apache-2.0
| 41,619
|
[
"FEFF"
] |
b04df72a6b7c9fa042e690bdf3fd79f64812c64c3b9dd95ebc707ac0149702e1
|
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 5 12:30:26 2013
@author: Daniel Struck
create blast nucleotide database from the COMET training set with the following command:
makeblastdb -in training_subtype_03.fasta -dbtype nucl
run blast with the following command:
(blast version: 2.2.29)
blastn -num_threads 12 -db training_subtype_03.fasta -query 2014-01-09_noise.fasta -num_descriptions 1 -num_alignments 0 > blast_results.txt
"""
from __future__ import division
from collections import Counter
n = 0
window_n = Counter()
noise_n = Counter()
pos = 0
window_pos = Counter()
noise_pos = Counter()
query = ""
with open("blast_results.txt") as f:
for line in f:
if line.startswith("Query"):
query = line.split(".")[1]
token = line.split("_")
noise = float(token[-4][1:])
window_size = int(token[-3][1:])
window_n[window_size] += 1
noise_n[noise] += 1
n += 1
for _ in range(6): line = next(f) # forward to result
result = line.split(".")[0].lstrip()
if query == result:
pos += 1
window_pos[window_size] += 1
noise_pos[noise] += 1
print "\tdetected\tn\tsensitivity"
print "total:\t{}\t{}\t{}".format(pos, n, pos / n * 100)
print
for noise in sorted(noise_n.keys()):
print "noise {}:\t{}\t{}\t{}".format(noise, noise_pos[noise], noise_n[noise], noise_pos[noise] / noise_n[noise])
print
for window in sorted(window_n.keys()):
print "window {}:\t{}\t{}\t{}".format(window, window_pos[window], window_n[window], window_pos[window] / window_n[window])
|
dstruck/comet-paper-scripts
|
compare_blast_comet/analyze_blast.py
|
Python
|
gpl-2.0
| 1,720
|
[
"BLAST"
] |
37f385d61c1fb46368c69c6b697241c7922cb72cbe2a7f353d29c6d8ef472487
|
# EXPERIMENTAL. DO NOT USE UNLESS YOU ARE JORIK. (OR JORIS)
import vtk
className = obj.__class__.__name__
if className == 'slice3dVWR':
ipw = obj.sliceDirections._sliceDirectionsDict.values()[0]._ipws[0]
if ipw.GetInput():
mins, maxs = ipw.GetInput().GetScalarRange()
else:
mins, maxs = 1, 1000000
lut = vtk.vtkLookupTable()
lut.SetTableRange(mins, maxs)
lut.SetHueRange(0.1, 1.0)
lut.SetSaturationRange(1.0, 1.0)
lut.SetValueRange(1.0, 1.0)
lut.SetAlphaRange(1.0, 1.0)
lut.Build()
ipw.SetUserControlledLookupTable(1)
ipw.SetLookupTable(lut)
else:
print "You have to mark a slice3dVWR module!"
|
nagyistoce/devide
|
snippets/changeSVLut.py
|
Python
|
bsd-3-clause
| 694
|
[
"VTK"
] |
fd581cae87b4b2e1fb8a49eacd65175ad937308bf9fa437424d2ac5093d7b423
|
# -*- coding: utf-8 -*-
# This code is part of Amoco
# Copyright (C) 2018 Axel Tillequin (bdcht3@gmail.com)
# published under GPLv2 license
# spec_xxx files are providers for instruction objects.
# These objects are wrapped and created by disasm.py.
from amoco.logger import Log
logger = Log(__name__)
logger.debug("loading module")
from amoco.arch.core import *
from amoco.arch.v850 import env
ISPECS = []
def _chk_reg_null(obj, r):
if r == 0:
raise InstructionError(obj)
# reserved instructions:
@ispec("32<[0000000000000000 ----- 111111 1----]", mnemonic="RIE")
@ispec("16<[0000000001000000]", mnemonic="RIE")
@ispec("16<[0000000000000000]", mnemonic="NOP")
@ispec("16<[0000000000011101]", mnemonic="SYNCE")
@ispec("16<[0000000000011110]", mnemonic="SYNCM")
@ispec("16<[0000000000011111]", mnemonic="SYNCP")
def v850_rie(obj):
obj.operands = []
obj.type = type_system
# format I
@ispec("16<[ reg2(5) 001110 reg1(5) ]", mnemonic="ADD")
@ispec("16<[ reg2(5) 001010 reg1(5) ]", mnemonic="AND")
@ispec("16<[ reg2(5) 001111 reg1(5) ]", mnemonic="CMP")
@ispec("16<[ reg2(5) 000010 reg1(5) ]", mnemonic="DIVH", _chk=1 & 2)
@ispec("16<[ reg2(5) 000000 reg1(5) ]", mnemonic="MOV", _chk=2)
@ispec("16<[ reg2(5) 000111 reg1(5) ]", mnemonic="MULH", _chk=2)
@ispec("16<[ reg2(5) 000001 reg1(5) ]", mnemonic="NOT")
@ispec("16<[ reg2(5) 001000 reg1(5) ]", mnemonic="OR")
@ispec("16<[ reg2(5) 000110 reg1(5) ]", mnemonic="SATADD", _chk=2)
@ispec("16<[ reg2(5) 000101 reg1(5) ]", mnemonic="SATSUB", _chk=2)
@ispec("16<[ reg2(5) 000100 reg1(5) ]", mnemonic="SATSUBR", _chk=2)
@ispec("16<[ reg2(5) 001101 reg1(5) ]", mnemonic="SUB")
@ispec("16<[ reg2(5) 001100 reg1(5) ]", mnemonic="SUBR")
@ispec("16<[ reg2(5) 001011 reg1(5) ]", mnemonic="TST")
@ispec("16<[ reg2(5) 001001 reg1(5) ]", mnemonic="XOR")
def v850_reg_reg(obj, reg1, reg2, _chk=0):
if _chk & 1:
_chk_reg_null(obj, reg1)
if _chk & 2:
_chk_reg_null(obj, reg2)
dst, src = env.R[reg2], env.R[reg1]
obj.operands = [src, dst]
obj.type = type_data_processing
@ispec("16<[ 0 v(4) 000010 00000 ]", mnemonic="FETRAP")
def v850_fetrap(obj, v):
if v == 0:
raise InstructionError(obj)
vector = env.cst(v, 4)
obj.operands = [vector]
obj.type = type_system
@ispec("16<[ 00000 000011 reg1(5) ]", mnemonic="JMP")
@ispec("16<[ 00000 000010 reg1(5) ]", mnemonic="SWITCH")
def v850_jmp(obj, reg1):
dst = env.R[reg1]
obj.operands = [dst]
obj.type = type_control_flow
@ispec("16<[ 00000 000101 reg1(5) ]", mnemonic="SXB")
@ispec("16<[ 00000 000111 reg1(5) ]", mnemonic="SXH")
@ispec("16<[ 00000 000100 reg1(5) ]", mnemonic="ZXB")
@ispec("16<[ 00000 000110 reg1(5) ]", mnemonic="ZXH")
def v850_extend(obj, reg1):
dst = env.R[reg1]
obj.operands = [dst]
obj.type = type_data_processing
# format II
@ispec("16<[ reg2(5) 010010 imm(5) ]", mnemonic="ADD")
@ispec("16<[ reg2(5) 010011 imm(5) ]", mnemonic="CMP")
@ispec("16<[ reg2(5) 010000 imm(5) ]", mnemonic="MOV", _chk=2)
@ispec("16<[ reg2(5) 010111 imm(5) ]", mnemonic="MULH", _chk=2)
@ispec("16<[ reg2(5) 010101 imm(5) ]", mnemonic="SAR")
@ispec("16<[ reg2(5) 010001 imm(5) ]", mnemonic="SATADD", _chk=2)
@ispec("16<[ reg2(5) 010110 imm(5) ]", mnemonic="SHL")
@ispec("16<[ reg2(5) 010100 imm(5) ]", mnemonic="SHR")
def v850_imm_reg(obj, imm, reg2, _chk=0):
if _chk & 2:
_chk_reg_null(obj, reg2)
dst = env.R[reg2]
imm5 = env.cst(imm, 5).signextend(32)
obj.operands = [imm5, dst]
obj.type = type_data_processing
@ispec("16<[ 000000 1000 imm(6) ]", mnemonic="CALLT")
def v850_imm_reg(obj, imm):
imm = env.cst(imm << 1, 32)
obj.operands = [imm]
obj.type = type_data_processing
# format III
@ispec("16<[ ~dhi(5) 0000 ~dlo(3) .cond(4) ]", mnemonic="B")
def v850_br_cond(obj, dhi, dlo):
disp = (dlo // dhi).int()
disp = env.cst(disp << 1, 9).signextend(32)
obj.operands = [disp]
obj.type = type_control_flow
# format IV
@ispec("16<[ reg2(5) 0110 disp(6) d ]", mnemonic="SLDB", _size=8)
@ispec("16<[ reg2(5) 1000 disp(6) d ]", mnemonic="SLDH", _size=16)
@ispec("16<[ reg2(5) 1000 disp(6) 0=d ]", mnemonic="SLDW", _size=32)
@ispec("16<[ reg2(5) 0111 disp(6) d ]", mnemonic="SSTB", _size=8)
@ispec("16<[ reg2(5) 1001 disp(6) d ]", mnemonic="SSTH", _size=16)
@ispec("16<[ reg2(5) 1010 disp(6) 1=d ]", mnemonic="SSTW", _size=32)
def v850_ld_st(obj, reg2, disp, d, _size):
disp = disp << 1
if _size < 32:
disp += d
if _size > 8:
disp = disp << 1
dst, src = env.R[reg2], env.mem(env.ep, _size, disp=disp)
if obj.mnemonic.startswith("SST"):
src, dst = dst, src
obj.operands = [src, dst]
obj.type = type_data_processing
@ispec("16<[ reg2(5) 0000110 disp(4) ]", mnemonic="SLDBU", _size=8)
@ispec("16<[ reg2(5) 0000111 disp(4) ]", mnemonic="SLDHU", _size=16)
def v850_ld_st(obj, reg2, disp, _size):
if reg2 == 0:
raise InstructionError(obj)
if _size > 8:
disp = disp << 1
dst, src = env.R[reg2], env.mem(env.ep, _size, disp=disp)
obj.operands = [dst, src]
obj.type = type_data_processing
# format V
@ispec("32<[ ~dlo(15) 0 reg2(5) 11110 ~dhi(6) ]", mnemonic="JARL")
def v850_jmp(obj, dhi, reg2, dlo):
disp = env.cst((dlo // dhi).int(), 22).signextend(32)
obj.operands = [disp]
if reg2 == 0:
obj.mnemonic = "JR"
else:
dst = env.R[reg2]
obj.operands.append(dst)
obj.type = type_control_flow
# format VI
@ispec("32<[ imm(16) reg2(5) 110000 reg1(5) ]", mnemonic="ADDI")
@ispec("32<[ imm(16) reg2(5) 110110 reg1(5) ]", mnemonic="ANDI")
@ispec("32<[ imm(16) reg2(5) 110001 reg1(5) ]", mnemonic="MOVEA", _chk=2)
@ispec("32<[ imm(16) reg2(5) 110010 reg1(5) ]", mnemonic="MOVHI", _chk=2)
@ispec("32<[ imm(16) reg2(5) 110111 reg1(5) ]", mnemonic="MULHI", _chk=2)
@ispec("32<[ imm(16) reg2(5) 110100 reg1(5) ]", mnemonic="ORI")
@ispec("32<[ imm(16) reg2(5) 110011 reg1(5) ]", mnemonic="SATSUBI", _chk=2)
@ispec("32<[ imm(16) reg2(5) 110101 reg1(5) ]", mnemonic="XORI")
def v850_3ops(obj, imm, reg2, reg1, _chk=0):
if _chk & 2:
_chk_reg_null(obj, reg2)
dst, src = env.R[reg2], env.R[reg1]
imm16 = env.cst(imm, 16)
obj.operands = [imm16, src, dst]
obj.type = type_data_processing
@ispec("48<[ disp(31) 0 00000 010111 reg1(5) ]", mnemonic="JARL")
@ispec("48<[ disp(31) 0 00000 110111 reg1(5) ]", mnemonic="JMP")
def v850_3ops(obj, disp, reg1):
dst = env.R[reg1]
disp32 = env.cst(disp << 1, 32)
obj.operands = [disp32, dst]
if obj.mnemonic == "JARL" and reg1 == 0:
obj.mnemonic = "JR"
obj.operands.pop()
obj.type = type_control_flow
@ispec("48<[ disp(32) 00000 110001 reg1(5) ]", mnemonic="MOV")
def v850_mov(obj, disp, reg1):
dst = env.R[reg1]
disp32 = env.cst(disp, 32)
obj.operands = [disp32, dst]
obj.type = type_data_processing
# format VII
@ispec("32<[ ~disp(15) d reg2(5) 111000 reg1(5) ]", mnemonic="LDB", _size=8)
@ispec("32<[ ~disp(15) 0=d reg2(5) 111001 reg1(5) ]", mnemonic="LDH", _size=16)
@ispec("32<[ ~disp(15) 1=d reg2(5) 111111 reg1(5) ]", mnemonic="LDHU", _size=16)
@ispec("32<[ ~disp(15) 1=d reg2(5) 111001 reg1(5) ]", mnemonic="LDW", _size=32)
@ispec("32<[ ~disp(15) d reg2(5) 111010 reg1(5) ]", mnemonic="STB", _size=8)
@ispec("32<[ ~disp(15) 0=d reg2(5) 111011 reg1(5) ]", mnemonic="STH", _size=16)
@ispec("32<[ ~disp(15) 1=d reg2(5) 111011 reg1(5) ]", mnemonic="STW", _size=32)
def v850_ld_st(obj, disp, d, reg2, reg1, _size):
disp = disp.int(-1) << 1
if _size == 8:
disp |= d
src = env.mem(env.R[reg1], _size, disp=disp)
dst = env.R[reg2]
if obj.mnemonic.startswith("ST"):
src, dst = dst, src
obj.operands = [src, dst]
obj.type = type_data_processing
@ispec("32<[ ~d(15) 1 reg2(5) 11110 ~b reg1(5) ]", mnemonic="LDBU", _size=8)
def v850_ld_st(obj, d, reg2, b, reg1, _size):
if reg2 == 0:
raise InstructionError(obj)
disp = (b // d).int(-1)
src = env.mem(env.R[reg1], _size, disp=disp)
dst = env.R[reg2]
obj.operands = [src, dst]
obj.type = type_data_processing
# format VIII
@ispec("32<[ ~disp(16) 10 ~bnum(3) 111110 reg1(5) ]", mnemonic="CLR1")
@ispec("32<[ ~disp(16) 01 ~bnum(3) 111110 reg1(5) ]", mnemonic="NOT1")
@ispec("32<[ ~disp(16) 00 ~bnum(3) 111110 reg1(5) ]", mnemonic="SET1")
@ispec("32<[ ~disp(16) 11 ~bnum(3) 111110 reg1(5) ]", mnemonic="TST1")
def v850_bitwise(obj, disp, bnum, reg1):
src = env.mem(env.R[reg1], 8, disp=disp.int(-1))
obj.operands = [cst(bnum.int(), 3), src]
obj.type = type_data_processing
# format IX
@ispec("32<[ 000000001110010 0 reg2(5) 111111 reg1(5) ]", mnemonic="CLR1")
@ispec("32<[ 000000001110001 0 reg2(5) 111111 reg1(5) ]", mnemonic="NOT1")
@ispec("32<[ 000000001110000 0 reg2(5) 111111 reg1(5) ]", mnemonic="SET1")
@ispec("32<[ 000000001110011 0 reg2(5) 111111 reg1(5) ]", mnemonic="TST1")
def v850_ext1(obj, reg2, reg1):
src = env.mem(env.R[reg1], 8)
r2 = env.R[reg2]
obj.operands = [r2, src]
obj.type = type_data_processing
@ispec("32<[ 000000000010000 0 reg2(5) 111111 reg1(5) ]", mnemonic="LDSR")
@ispec("32<[ 000000001010000 0 reg2(5) 111111 reg1(5) ]", mnemonic="SAR")
@ispec("32<[ 000000001100000 0 reg2(5) 111111 reg1(5) ]", mnemonic="SHL")
@ispec("32<[ 000000001000000 0 reg2(5) 111111 reg1(5) ]", mnemonic="SHR")
@ispec("32<[ 000000000100000 0 reg2(5) 111111 reg1(5) ]", mnemonic="STSR")
def v850_ldsr(obj, reg2, reg1):
# src,dst are inverted for LDSR:
src, dst = env.R[reg2], env.R[reg1]
if obj.mnemonic.startswith("ST"):
src, dst = dst, src
obj.operands = [src, dst]
obj.type = type_data_processing
@ispec("32<[ 000000100000000 0 reg2(5) 111111 0 cond(4) ]", mnemonic="SASF")
@ispec("32<[ 000000000000000 0 reg2(5) 111111 0 cond(4) ]", mnemonic="SETF")
def v850_cccc(obj, reg2, cond):
c, dst = cond, env.R[reg2]
obj.operands = [c, dst]
obj.type = type_data_processing
# format X
@ispec("32<[ 000000010100010 0 0000011111100000 ]", mnemonic="CTRET")
@ispec("32<[ 000000010110000 0 0000011111100000 ]", mnemonic="DI")
@ispec("32<[ 000000010110000 0 1000011111100000 ]", mnemonic="EI")
@ispec("32<[ 000001111110000 0 0000000101001000 ]", mnemonic="EIRET")
@ispec("32<[ 000000010100101 0 0000011111100000 ]", mnemonic="FERET")
@ispec("32<[ 000000010010000 0 0000011111100000 ]", mnemonic="HALT")
@ispec("32<[ 000000010100000 0 0000011111100000 ]", mnemonic="RETI")
def v850_ext2(obj):
obj.operands = []
obj.type = type_data_processing
@ispec("32<[ 00 ~V(3) 001011 00000 11010 111111 ~v(5) ]", mnemonic="SYSCALL")
def v850_syscall(obj, V, v):
obj.operands = [env.cst((v // V).int(), 8)]
obj.type = type_control_flow
@ispec("32<[ 0000000100000000 00000 111111 v(5) ]", mnemonic="TRAP")
def v850_trap(obj, v):
obj.operands = [env.cst(v, 5)]
obj.type = type_control_flow
# format XI
@ispec("32<[ reg3(5) 011101 cond(4) 0 reg2(5) 111111 reg1(5) ]", mnemonic="ADF")
@ispec("32<[ reg3(5) 011001 cond(4) 0 reg2(5) 111111 reg1(5) ]", mnemonic="CMOV")
@ispec("32<[ reg3(5) 011100 cond(4) 0 reg2(5) 111111 reg1(5) ]", mnemonic="SBF")
def v850_cccc(obj, reg3, cond, reg2, reg1):
if cond == env.CONDITION_SA and obj.mnemonic in ("ADF", "SBF"):
raise InstructionError(obj)
dst, src2, src1 = env.R[reg3], env.R[reg2], env.R[reg1]
obj.operands = [cond, src1, src2, dst]
obj.type = type_data_processing
@ispec("32<[ reg3(5) 000111 0111 0 reg2(5) 111111 reg1(5) ]", mnemonic="CAXI")
def v850_ext3(obj, reg3, reg2, reg1):
dst, src2, src1 = env.R[reg3], env.R[reg2], env.R[reg1]
adr = src1 & 0xFFFFFFFC
obj.operands = [env.mem(adr, 32), src2, dst]
obj.type = type_data_processing
@ispec("32<[ reg4(4) 0011110 reg3(4) 0 reg2(5) 111111 reg1(5) ]", mnemonic="MAC")
@ispec("32<[ reg4(4) 0011111 reg3(4) 0 reg2(5) 111111 reg1(5) ]", mnemonic="MACU")
def v850_mac(obj, reg4, reg3, reg2, reg1):
dst, src3, src2, src1 = env.R[reg4 << 1], env.R[reg3 << 1], env.R[reg2], env.R[reg1]
i.misc["reg4"] = reg4 << 1
i.misc["reg3"] = reg3 << 1
obj.operands = [src1, src2, src3, dst]
obj.type = type_data_processing
@ispec("32<[ reg3(5) 01011 000000 reg2(5) 111111 reg1(5) ]", mnemonic="DIV")
@ispec("32<[ reg3(5) 01010 000000 reg2(5) 111111 reg1(5) ]", mnemonic="DIVH")
@ispec("32<[ reg3(5) 01011 000010 reg2(5) 111111 reg1(5) ]", mnemonic="DIVU")
@ispec("32<[ reg3(5) 01010 000010 reg2(5) 111111 reg1(5) ]", mnemonic="DIVHU")
@ispec("32<[ reg3(5) 01011 111100 reg2(5) 111111 reg1(5) ]", mnemonic="DIVQ")
@ispec("32<[ reg3(5) 01011 111110 reg2(5) 111111 reg1(5) ]", mnemonic="DIVQU")
@ispec("32<[ reg3(5) 01000 100000 reg2(5) 111111 reg1(5) ]", mnemonic="MUL")
@ispec("32<[ reg3(5) 01000 100010 reg2(5) 111111 reg1(5) ]", mnemonic="MULU")
@ispec("32<[ reg3(5) 00010 100010 reg2(5) 111111 reg1(5) ]", mnemonic="SAR")
@ispec("32<[ reg3(5) 01110 111010 reg2(5) 111111 reg1(5) ]", mnemonic="SATADD")
@ispec("32<[ reg3(5) 01110 011010 reg2(5) 111111 reg1(5) ]", mnemonic="SATSUB")
@ispec("32<[ reg3(5) 00011 000010 reg2(5) 111111 reg1(5) ]", mnemonic="SHL")
@ispec("32<[ reg3(5) 00010 000010 reg2(5) 111111 reg1(5) ]", mnemonic="SHR")
def v850_ext3(obj, reg3, reg2, reg1):
dst, src2, src1 = env.R[reg3], env.R[reg2], env.R[reg1]
obj.operands = [src1, src2, dst]
obj.type = type_data_processing
# format XII
@ispec("32<[ reg3(5) 01101000010 reg2(5) 111111 00000 ]", mnemonic="BSH")
@ispec("32<[ reg3(5) 01101000000 reg2(5) 111111 00000 ]", mnemonic="BSW")
@ispec("32<[ reg3(5) 01101000110 reg2(5) 111111 00000 ]", mnemonic="HSH")
@ispec("32<[ reg3(5) 01101000100 reg2(5) 111111 00000 ]", mnemonic="HSW")
@ispec("32<[ reg3(5) 01101100100 reg2(5) 111111 00000 ]", mnemonic="SCHOL")
@ispec("32<[ reg3(5) 01101100000 reg2(5) 111111 00000 ]", mnemonic="SCHOR")
@ispec("32<[ reg3(5) 01101100110 reg2(5) 111111 00000 ]", mnemonic="SCH1L")
@ispec("32<[ reg3(5) 01101100010 reg2(5) 111111 00000 ]", mnemonic="SCH1R")
def v850_ext4(obj, reg3, reg2):
dst, src = env.R[reg3], env.R[reg2]
obj.operands = [src, dst]
obj.type = type_data_processing
@ispec("32<[ reg3(5) 011000 cond(4) 0 reg2(5) 111111 imm5(5) ]", mnemonic="CMOV")
def v850_cccc(obj, reg3, cond, reg2, imm5):
imm = env.cst(imm5, 5).signextend(32)
dst, src = env.R[reg3], env.R[reg2]
obj.operands = [cond, imm, src, dst]
obj.type = type_data_processing
@ispec("32<[ reg3(5) 01001 ~I(4) 00 reg2(5) 111111 ~imm5(5) ]", mnemonic="MUL")
@ispec("32<[ reg3(5) 01001 ~I(4) 10 reg2(5) 111111 ~imm5(5) ]", mnemonic="MULU")
def v850_ext4(obj, reg3, I, reg2, imm5):
imm9 = env.cst((imm5 // I), 9)
dst, src = env.R[reg3], env.R[reg2]
obj.operands = [imm9, src, dst]
obj.type = type_data_processing
# format XIII
@ispec("32<[ ~l1(11) reg2(5) 00000 11001 imm(5) ~l0(1) ]", mnemonic="DISPOSE")
def v850_dispose(obj, l1, reg2, imm, l0):
imm = imm << 2
list12 = [env.R[x] for x in (30, 31, 29, 28, 23, 22, 21, 20, 27, 26, 25, 24)]
L = []
for b in (l0 // l1).bitlist():
r = list12.pop(0)
if b == 1:
L.append(r)
obj.operands = [env.mem(env.sp, 32, disp=imm), L]
if reg2 != 0:
obj.operands.append(env.R[reg2])
obj.type = type_data_processing
@ispec("32<[ ~l1(11) 00001 00000 11110 imm(5) ~l0(1) ]", mnemonic="PREPARE")
def v850_prepare(obj, l1, imm, l0):
imm = imm << 2
list12 = [env.R[x] for x in (30, 31, 29, 28, 23, 22, 21, 20, 27, 26, 25, 24)]
L = []
for b in (l0 // l1).bitlist():
r = list12.pop(0)
if b == 1:
L.append(r)
obj.operands = [L, env.cst(imm, 5)]
obj.type = type_data_processing
@ispec(
"64<[ imm32(32) ~lh(11) ff(2) 011 00000 11110 imm(5) ~lo(1) ]", mnemonic="PREPARE"
)
def v850_prepare(obj, imm32, lh, ff, imm, lo):
imm = imm << 2
list12 = [env.R[x] for x in (30, 31, 29, 28, 23, 22, 21, 20, 27, 26, 25, 24)]
L = []
for b in (lo // lh).bitlist():
r = list12.pop(0)
if b == 1:
L.append(r)
if ff == 0b00:
op3 = env.sp
elif ff == 0b01:
op3 = env.cst(imm & 0xFFFF, 16).signextend(32)
elif ff == 0b10:
op3 = env.cst((imm & 0xFFFF) << 16, 32)
elif ff == 0b11:
op3 = env.cst(imm, 32)
obj.operands = [L, env.cst(imm, 5), op3]
obj.type = type_data_processing
# format XIV
@ispec(
"48<[ ~dhi(16) reg3(5) ~dlo(7) 0101 00000 111100 reg1(5) ]", mnemonic="LDB", _size=8
)
@ispec(
"48<[ ~dhi(16) reg3(5) ~dlo(7) 0101 00000 111101 reg1(5) ]",
mnemonic="LDBU",
_size=8,
)
@ispec(
"48<[ ~dhi(16) reg3(5) ~dlo(7) 0111 00000 111100 reg1(5) ]",
mnemonic="LDH",
_size=16,
)
@ispec(
"48<[ ~dhi(16) reg3(5) ~dlo(7) 0111 00000 111101 reg1(5) ]",
mnemonic="LDHU",
_size=16,
)
@ispec(
"48<[ ~dhi(16) reg3(5) ~dlo(7) 1001 00000 111100 reg1(5) ]",
mnemonic="LDW",
_size=32,
)
@ispec(
"48<[ ~dhi(16) reg3(5) ~dlo(7) 1101 00000 111100 reg1(5) ]", mnemonic="STB", _size=8
)
@ispec(
"48<[ ~dhi(16) reg3(5) ~dlo(7) 1101 00000 111101 reg1(5) ]",
mnemonic="STH",
_size=16,
)
@ispec(
"48<[ ~dhi(16) reg3(5) ~dlo(7) 1111 00000 111100 reg1(5) ]",
mnemonic="STW",
_size=32,
)
def v850_ld_st(obj, dhi, reg3, dlo, reg1, _size):
if _size > 8 and dlo[0]:
raise InstructionError(obj)
disp23 = (dlo // dhi).int(-1)
src = env.mem(env.R[reg1], _size, disp=disp23)
dst = env.R[reg3]
if obj.mnemonic.startswith("ST"):
src, dst = dst, src
obj.operands = [src, dst]
obj.type = type_data_processing
|
LRGH/amoco
|
amoco/arch/v850/spec_v850e2s.py
|
Python
|
gpl-2.0
| 17,578
|
[
"ADF"
] |
1c8edbb996b2c00d63a6e615f14304b166f7ae8baa81b237134396a6598b37a8
|
import re
import functools
from copy import deepcopy
from flask import request
from brain.api.base import processRequest
from marvin import config
from marvin.core.exceptions import MarvinError
from marvin.utils.dap.datamodel import dap_datamodel as dm
from marvin.tools.maps import _get_bintemps
from webargs import fields, validate, ValidationError
from webargs.flaskparser import use_args, use_kwargs, parser
def plate_in_range(val):
if int(val) < 6500:
raise ValidationError('Plateid must be > 6500')
# List of global View arguments across all API routes
viewargs = {'name': fields.String(required=True, location='view_args', validate=[validate.Length(min=4),
validate.Regexp('^[0-9-]*$')]),
'galid': fields.String(required=True, location='view_args', validate=[validate.Length(min=4),
validate.Regexp('^[0-9-]*$')]),
'bintype': fields.String(required=True, location='view_args'),
'template_kin': fields.String(required=True, location='view_args'),
'property_name': fields.String(required=True, location='view_args'),
'channel': fields.String(required=True, location='view_args'),
'binid': fields.Integer(required=True, location='view_args', validate=validate.Range(min=-1, max=5800)),
'plateid': fields.String(required=True, location='view_args', validate=[validate.Length(min=4, max=5),
plate_in_range]),
'x': fields.Integer(required=True, location='view_args', validate=validate.Range(min=0, max=100)),
'y': fields.Integer(required=True, location='view_args', validate=validate.Range(min=0, max=100)),
'mangaid': fields.String(required=True, location='view_args', validate=validate.Length(min=4, max=20)),
'paramdisplay': fields.String(required=True, location='view_args', validate=validate.OneOf(['all', 'best'])),
'cube_extension': fields.String(required=True, location='view_args',
validate=validate.OneOf(['flux', 'ivar', 'mask']))
}
# List of all form parameters that are needed in all the API routes
# allow_none = True allows for the parameter to be non-existent when required=False and missing is not set
# setting missing = None by itself also works except when the parameter is also required
# (i.e. required=True + missing=None does not trigger the "required" validation error when it should)
params = {'query': {'searchfilter': fields.String(allow_none=True),
'paramdisplay': fields.String(allow_none=True, validate=validate.OneOf(['all', 'best'])),
'task': fields.String(allow_none=True, validate=validate.OneOf(['clean', 'getprocs'])),
'start': fields.Integer(allow_none=True, validate=validate.Range(min=0)),
'end': fields.Integer(allow_none=True, validate=validate.Range(min=0)),
'offset': fields.Integer(allow_none=True, validate=validate.Range(min=0)),
'limit': fields.Integer(missing=100, validate=validate.Range(max=50000)),
'sort': fields.String(allow_none=True),
'order': fields.String(missing='asc', validate=validate.OneOf(['asc', 'desc'])),
'rettype': fields.String(allow_none=True, validate=validate.OneOf(['cube', 'spaxel', 'maps', 'rss', 'modelcube'])),
'params': fields.DelimitedList(fields.String(), allow_none=True)
},
'search': {'searchbox': fields.String(required=True),
'parambox': fields.DelimitedList(fields.String(), allow_none=True)
},
'index': {'galid': fields.String(allow_none=True, validate=validate.Length(min=4)),
'mplselect': fields.String(allow_none=True, validate=validate.Regexp('MPL-[1-9]'))
},
'galaxy': {'plateifu': fields.String(allow_none=True, validate=validate.Length(min=8, max=11)),
'toggleon': fields.String(allow_none=True, validate=validate.OneOf(['true', 'false'])),
'image': fields.Url(allow_none=True),
'imheight': fields.Integer(allow_none=True, validate=validate.Range(min=0, max=1000)),
'imwidth': fields.Integer(allow_none=True, validate=validate.Range(min=0, max=1000)),
'type': fields.String(allow_none=True, validate=validate.OneOf(['optical', 'heatmap'])),
'x': fields.String(allow_none=True),
'y': fields.String(allow_none=True),
'mousecoords[]': fields.List(fields.String(), allow_none=True),
'bintemp': fields.String(allow_none=True),
'params[]': fields.List(fields.String(), allow_none=True)
}
}
# Add a custom Flask session location handler
@parser.location_handler('session')
def parse_session(req, name, field):
from flask import session as current_session
value = current_session.get(name, None)
return value
class ArgValidator(object):
''' Web/API Argument validator '''
def __init__(self, urlmap={}):
self.release = None
self.endpoint = None
self.dapver = None
self.urlmap = urlmap
self.base_args = {'release': fields.String(required=True,
validate=validate.Regexp('MPL-[1-9]'))}
self.use_params = None
self._required = None
self._setmissing = None
self._main_kwargs = {}
self.final_args = {}
self.final_args.update(self.base_args)
self._parser = parser
self.use_args = use_args
self.use_kwargs = use_kwargs
def _reset_final_args(self):
''' Resets the final args dict '''
self.final_args = {}
self.final_args.update(self.base_args)
def _get_url(self):
''' Retrieve the URL route from the map based on the request endpoint '''
blue, end = self.endpoint.split('.', 1)
url = self.urlmap[blue][end]['url']
# if the blueprint is not api, add/remove session option location
if blue == 'api':
if 'session' in parser.locations:
pl = list(parser.locations)
pl.remove('session')
parser.locations = tuple(pl)
else:
if 'session' not in parser.locations:
parser.locations += ('session', )
return url
def _extract_view_args(self):
''' Extract any view argument parameters contained in the URL route '''
url = self._get_url()
url_viewargs = re.findall(r'{(.*?)}', url)
return url_viewargs
def _add_param_args(self):
''' Adds all appropriate form arguments into dictionary for validation '''
# get the url
url = self._get_url()
# check list or not
self.use_params = [self.use_params] if not isinstance(self.use_params, (list, tuple)) else self.use_params
for local_param in self.use_params:
if local_param in params:
# update param validation
if self._required or self._setmissing:
newparams = self.update_param_validation(local_param)
else:
newparams = deepcopy(params)
# add to params final args
self.final_args.update(newparams[local_param])
def _set_params_required(self, subset):
''' Set the param validation required parameter '''
# make list or not
self._required = [self._required] if not isinstance(self._required, (list, tuple)) else self._required
# update the required attribute
for req_param in self._required:
if req_param in subset.keys():
subset[req_param].required = True
subset[req_param].allow_none = False
if req_param == 'bintemp':
bintemps = self._get_bin_temps()
subset[req_param].validate = validate.OneOf(bintemps)
subset[req_param].validators.append(validate.OneOf(bintemps))
return subset
def _set_params_missing(self, subset):
''' Set the param validation missing parameter '''
for miss_field in subset.values():
miss_field.missing = None
return subset
def update_param_validation(self, name):
''' Update the validation of form params '''
# deep copy the global parameter dict
newparams = deepcopy(params)
subset = newparams[name]
# Set required params
if self._required:
subset = self._set_params_required(subset)
# Set missing params
if self._setmissing:
subset = self._set_params_missing(subset)
# return the new params
newparams[name] = subset
return newparams
def _add_view_args(self):
''' Adds all appropriate View arguments into dictionary for validation '''
local_viewargs = self._extract_view_args()
# check if any local_view args need new validation
props = ['bintype', 'template_kin', 'property_name', 'channel']
ismatch = set(local_viewargs) & set(props)
if ismatch:
self.update_view_validation()
# add only the local view args to the final arg list
if local_viewargs:
for varg in local_viewargs:
self.final_args.update({varg: viewargs[varg]})
def _update_viewarg(self, name, choices):
''' Updates the global View arguments validator '''
viewargs[name] = fields.String(required=True, location='view_args', validate=validate.OneOf(choices))
# viewargs[name].validate = validate.OneOf(choices)
def _get_bin_temps(self):
''' Gets the bintemps for a given release '''
bintemps = _get_bintemps(self.dapver)
return bintemps
def update_view_validation(self):
''' Update the validation of DAP MPL specific names based on the datamodel '''
# update all the dap datamodel specific options
bintemps = self._get_bin_temps()
bintypes = list(set([b.split('-', 1)[0] for b in bintemps]))
temps = list(set([b.split('-', 1)[1] for b in bintemps]))
properties = dm[self.dapver].list_names()
channels = list(set(sum([i.channels for i in dm[self.dapver] if i.channels is not None], []))) + ['None']
# update the global viewargs for each property
propfields = {'bintype': bintypes, 'template_kin': temps, 'property_name': properties,
'channel': channels}
for key, val in propfields.items():
self._update_viewarg(key, val)
def create_args(self):
''' Build the final argument list for webargs validation '''
# get the dapver
drpver, self.dapver = config.lookUpVersions(self.release)
# reset the final args
self._reset_final_args()
# add view args to the list
self._add_view_args()
# add form param args to the list
if self.use_params:
self._add_param_args()
def _pop_kwargs(self, **kwargs):
''' Pop all non webargs kwargs out of the main kwarg dict '''
webargs_kwargs = ['req', 'locations', 'as_kwargs', 'validate']
tempkwargs = kwargs.copy()
for key in kwargs:
if key not in webargs_kwargs:
tmp = tempkwargs.pop(key, None)
self._main_kwargs = tempkwargs
def _check_mainkwargs(self, **kwargs):
self.use_params = kwargs.pop('use_params', None)
self._required = kwargs.pop('required', None)
self._setmissing = kwargs.pop('set_missing', None)
def _get_release_endpoint(self, view):
''' get the release and endpoint if you can '''
self.release = view._release
self.endpoint = view._endpoint
def check_args(self, **mainkwargs):
''' Checks the input view and parameter arguments for validation using webargs
This is a decorator and modifies the standard webargs.flaskparser use_args decorator
'''
# self note: nothing can go out here since the decorator is called a lot of overwrites shit
# decorator used to grab the release and endpoint of the route
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# args and kwargs here are the view function args and kwargs
self._get_release_endpoint(args[0])
# self.release = args[0]._release
# self.endpoint = args[0]._endpoint
# check the kwargs for any parameters
self._check_mainkwargs(**mainkwargs)
# create the arguments dictionary
self.create_args()
# pop all the kwargs
self._pop_kwargs(**mainkwargs)
# pass into webargs use_args (use_args is a decorator in itself)
newfunc = self.use_args(self.final_args, **self._main_kwargs)(func)
return newfunc(*args, **kwargs)
return wrapper
return decorator
def check_kwargs(self, **kwargs):
kwargs['as_kwargs'] = True
return self.check_args(**kwargs)
def check_release(self, **kwargs):
''' Checks only the release '''
return self.use_kwargs(self.base_args)
def list_params(self, param_type=None):
''' List the globally defined parameters for validation
'''
total = {'viewargs': viewargs, 'params': params}
if param_type == 'viewargs':
return total['viewargs']
elif param_type == 'params':
return total['params']
else:
return total
def manual_parse(self, view, req, **mainkwargs):
''' Manually parse the args '''
# args = parser.parse(user_args, request)
self._get_release_endpoint(view)
url = self._get_url()
self._check_mainkwargs(**mainkwargs)
self.create_args()
self._pop_kwargs(**mainkwargs)
newargs = parser.parse(self.final_args, req, force_all=True, **self._main_kwargs)
# see if we make it a multidict
makemulti = mainkwargs.get('makemulti', None)
if makemulti:
from werkzeug.datastructures import ImmutableMultiDict
newargs = ImmutableMultiDict(newargs.copy())
return newargs
|
bretthandrews/marvin
|
python/marvin/api/__init__.py
|
Python
|
bsd-3-clause
| 14,656
|
[
"Galaxy"
] |
54883ed2cf2113829f853dcbf832ae5cbcfde2127936f08826ce03f54d8ba5a0
|
from diagnostic import unitroot_adf
import statsmodels.datasets.macrodata.data as macro
macrod = macro.load().data
print macro.NOTE
print macrod.dtype.names
datatrendli = [
('realgdp', 1),
('realcons', 1),
('realinv', 1),
('realgovt', 1),
('realdpi', 1),
('cpi', 1),
('m1', 1),
('tbilrate', 0),
('unemp',0),
('pop', 1),
('infl',0),
('realint', 0)
]
print '%-10s %5s %-8s' % ('variable', 'trend', ' adf')
for name, torder in datatrendli:
adf_, pval = unitroot_adf(macrod[name], trendorder=torder)[:2]
print '%-10s %5d %8.4f %8.4f' % (name, torder, adf_, pval)
|
yarikoptic/pystatsmodels
|
statsmodels/sandbox/stats/ex_newtests.py
|
Python
|
bsd-3-clause
| 773
|
[
"ADF"
] |
828ca6ec94fc1eea966990a0459a9f850754ef6315361a355ebe7900b21633b5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright © 2016 Matthew Stone <mstone5@mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
"""
from __future__ import print_function
import unittest
try:
unittest.skip
except AttributeError:
import unittest2 as unittest
import vcf
import pysam
import rpc
import svcall
import svfile
class TestHelpers(unittest.TestCase):
def test_is_smaller_chrom(self):
one = '1'
two = '2'
ten = '10'
x = 'X'
y = 'Y'
self.assertTrue(rpc.is_smaller_chrom(one, two))
self.assertTrue(rpc.is_smaller_chrom(one, one))
self.assertFalse(rpc.is_smaller_chrom(one, one, le=False))
self.assertFalse(rpc.is_smaller_chrom(two, one))
self.assertTrue(rpc.is_smaller_chrom(two, ten))
self.assertTrue(rpc.is_smaller_chrom(ten, x))
self.assertTrue(rpc.is_smaller_chrom(x, y))
self.assertFalse(rpc.is_smaller_chrom(y, x))
self.assertTrue(rpc.is_smaller_chrom(x, x))
self.assertFalse(rpc.is_smaller_chrom(x, x, le=False))
class TestSVCall(unittest.TestCase):
def test_delly(self):
reader = vcf.Reader(filename='example.delly.vcf')
record = next(reader)
call = svcall.DellyCall(record)
self.assertEqual(call.chrA, '1')
self.assertEqual(call.posA, 869478)
self.assertEqual(call.chrB, '1')
self.assertEqual(call.posB, 870222)
self.assertEqual(call.name, 'DEL00000001')
self.assertFalse(call.is_HQ())
# Test HQ variant
for record in reader:
if record.ID == 'DUP00000003':
break
call = svcall.DellyCall(record)
self.assertTrue(call.is_HQ())
def test_lumpy(self):
reader = vcf.Reader(filename='example.lumpy.vcf.gz', compressed=True)
record = next(reader)
call = svcall.LumpyCall(record)
self.assertEqual(call.chrA, '1')
self.assertEqual(call.posA, 869476)
self.assertEqual(call.chrB, '1')
self.assertEqual(call.posB, 870221)
self.assertEqual(call.svtype, 'DEL')
self.assertFalse(call.is_HQ())
# Test HQ variant
for record in reader:
if record.ID == '101':
break
call = svcall.LumpyCall(record)
self.assertEqual(call.svtype, 'INV')
self.assertTrue(call.is_HQ())
# Test secondary
for record in reader:
if record.ID.endswith('_2'):
break
call = svcall.LumpyCall(record)
self.assertTrue(call.is_secondary)
self.assertEqual(call.svtype, 'BND')
self.assertFalse(call.is_HQ())
def test_rpc(self):
reader = svfile.BedpeParser('example.rpc.bedpe')
# Test parsing
record = next(reader)
call = svcall.RPCCall(record)
self.assertEqual(call.chrA, '1')
self.assertEqual(call.posA, 869575)
self.assertEqual(call.chrB, '1')
self.assertEqual(call.posB, 870221)
self.assertEqual(call.name, 'SFARI_d11194p1_del_1')
self.assertEqual(call.sample, 'SFARI_d11194p1')
def test_equality(self):
c1 = svcall.SVCall('1', 1, '1', 10, 'c1')
c1b = svcall.SVCall('1', 1, '1', 10, 'c1')
c2 = svcall.SVCall('1', 1, '2', 10, 'c1')
c3 = svcall.SVCall('1', 1, '1', 11, 'c1')
self.assertEqual(c1, c1b)
self.assertNotEqual(c1, c2)
self.assertNotEqual(c1, c3)
def test_inequality(self):
c1 = svcall.SVCall('1', 1, '1', 10, 'c1')
c2 = svcall.SVCall('1', 2, '1', 9, 'c2')
c3 = svcall.SVCall('1', 1, '1', 8, 'c3')
c4 = svcall.SVCall('10', 1, '10', 10, 'c4')
c5 = svcall.SVCall('2', 1, '2', 10, 'c5')
c6 = svcall.SVCall('X', 1, 'X', 10, 'c6')
c7 = svcall.SVCall('X', 1, 'Y', 10, 'c7')
self.assertTrue(c1 < c2)
self.assertTrue(c1 <= c3)
self.assertTrue(c1 < c4)
self.assertTrue(c1 < c5)
self.assertTrue(c5 < c4)
self.assertTrue(c1 < c6)
self.assertTrue(c5 < c7)
def test_clusters(self):
dist = 500
c1 = svcall.SVCall('1', 1, '1', 1000, 'c1')
c2 = svcall.SVCall('2', 1, '2', 1000, 'c2')
self.assertFalse(c1.clusters_with(c2, dist))
c3 = svcall.SVCall('1', 550, '1', 1001, 'c3')
self.assertFalse(c1.clusters_with(c3, dist))
c4 = svcall.SVCall('1', 450, '1', 1001, 'c4')
self.assertTrue(c1.clusters_with(c4, dist))
c6 = svcall.SVCall('1', 450, '1', 1505, 'c6')
self.assertTrue(c1.is_clusterable_with(c6, dist))
self.assertFalse(c1.clusters_with(c6, dist))
def test_is_in(self):
tabixfile = pysam.TabixFile('example.tabix.bed.gz')
c = svcall.SVCall('1', 100, '1', 20000, 'c')
self.assertTrue(c.is_in(tabixfile))
c = svcall.SVCall('1', 4000, '1', 6000, 'c')
self.assertTrue(c.is_in(tabixfile))
c = svcall.SVCall('2', 4000, '1', 6000, 'c')
self.assertTrue(c.is_in(tabixfile))
c = svcall.SVCall('2', 4000, '2', 6000, 'c')
self.assertFalse(c.is_in(tabixfile))
c = svcall.SVCall('X', 4000, '2', 6000, 'c')
self.assertFalse(c.is_in(tabixfile))
c = svcall.SVCall('X', 400, '2', 6000, 'c')
self.assertTrue(c.is_in(tabixfile))
class TestSVCallCluster(unittest.TestCase):
def setUp(self):
reader = vcf.Reader(filename='example.lumpy.vcf.gz', compressed=True)
_ = next(reader)
r1 = next(reader)
r2 = next(reader)
c1 = svcall.LumpyCall(r1)
c2 = svcall.LumpyCall(r2)
self.cluster = svcall.SVCallCluster([c1, c2], name='merged')
def test_init(self):
self.assertEqual(self.cluster.posA, 964450)
self.assertEqual(self.cluster.posB, 964890)
self.assertTrue('SFARI_d11194p1' in self.cluster.obs)
obs_list = ['lumpy', 'lumpy']
self.assertTrue(self.cluster.obs['SFARI_d11194p1'] == obs_list)
def test_support_str(self):
samples = 'SFARI_d11194p1 SFARI_d11194s1'.split()
progs = 'delly lumpy rpc'.split()
support_str = self.cluster.support_str(samples, progs)
p1 = '0:2:0'
s1 = '0:0:0'
self.assertEqual(support_str.split()[-1], s1)
self.assertEqual(support_str.split()[-2], p1)
# class TestSVFileParsing(unittest.TestCase):
# """Test VCF parsing"""
# def test_vcf(self):
# sv_file = svfile.SVFile('example.delly.vcf')
# call = next(sv_file)
# self.assertEqual(call.chrA, '1')
# self.assertEqual(call.posA, 869478)
# self.assertEqual(call.chrB, '1')
# self.assertEqual(call.posB, 870222)
# self.assertEqual(call.name, 'DEL00000001')
# def test_gzipped_vcf(self):
# svfile = svfile.SVFile('example.lumpy.vcf.gz')
# call = svfile.next()
# self.assertEqual(call.chrA, '1')
# self.assertEqual(call.posA, 869476)
# self.assertEqual(call.chrB, '1')
# self.assertEqual(call.posB, 870221)
# self.assertEqual(call.name, '1')
if __name__ == '__main__':
unittest.main(warnings='ignore')
|
talkowski-lab/Holmes
|
readpaircluster/svcf/test_svcf.py
|
Python
|
mit
| 7,224
|
[
"pysam"
] |
a8974478a222af1c1e91ee1869dac065c825890fa5f5005de4e728d8fe7f6b26
|
import collections
import random
import unittest
import mock
from cardboard import card as c, events, zone as z
from cardboard.tests.util import GameTestCase
from cardboard.util import ANY
ENTER, LEAVE = events.ENTERED_ZONE, events.LEFT_ZONE
class ZoneTest(GameTestCase):
card = mock.Mock(spec=c.Card)
def setUp(self):
super(ZoneTest, self).setUp()
self.u = z.UnorderedZone(
name="Emerald Hill", game=self.game, contents=self.library,
)
self.o = z.OrderedZone(
name="Casino Night", game=self.game, contents=self.library,
)
class TestZones(ZoneTest):
def test_name(self):
self.assertEqual(self.u.name, "Emerald Hill")
self.assertEqual(self.o.name, "Casino Night")
def test_ordered(self):
self.assertFalse(self.u.ordered)
self.assertTrue(self.o.ordered)
def test_str_repr(self):
self.assertEqual(str(self.u), "Emerald Hill")
self.assertEqual(str(self.o), "Casino Night")
self.assertEqual(repr(self.u), "<Zone: Emerald Hill>")
self.assertEqual(repr(self.o), "<Zone: Casino Night>")
def test_contains(self):
for i in self.library:
self.assertIn(i, self.u)
self.assertIn(i, self.o)
self.assertNotIn(object(), self.u)
self.assertNotIn(object(), self.o)
def test_iter(self):
self.assertEqual(set(self.u), set(self.library))
self.assertEqual(list(self.o), self.library)
def test_len(self):
self.assertEqual(len(self.u), len(self.library))
self.assertEqual(len(self.o), len(self.library))
def test_add(self):
with self.assertTriggers(event=ENTER, card=30, zone=self.u):
self.u.add(30)
with self.assertTriggers(event=ENTER, card=30, zone=self.o):
self.o.add(30)
self.assertEqual(set(self.u), set(self.library) | {30})
self.assertEqual(list(self.o), self.library + [30])
def test_add_already_contains(self):
NO_OWNER, OWNER = "on the {}", "in {}'s {}"
u, o = self.u.name, self.o.name
n = mock.Mock()
self.u.add(n)
self.o.add(n)
self.resetEvents()
with self.assertRaisesRegexp(ValueError, NO_OWNER.format(u)):
self.u.add(n)
with self.assertRaisesRegexp(ValueError, NO_OWNER.format(o)):
self.o.add(n)
with self.assertRaisesRegexp(ValueError, OWNER.format(n.owner, u)):
self.u.owner = n.owner
self.u.add(n)
with self.assertRaisesRegexp(ValueError, OWNER.format(n.owner, o)):
self.o.owner = n.owner
self.o.add(n)
# wasn't added twice nor removed
self.assertIn(self.library[0], self.u)
self.assertEqual(self.o.count(self.library[0]), 1)
self.assertFalse(self.events.trigger.called)
def test_add_owner_redirection(self):
"""
Adding a card with a different owner than the zone's redirects.
"""
card = mock.Mock()
self.u.name, self.o.name = "foo", "bar"
self.u.owner, self.o.owner = mock.Mock(), mock.Mock()
self.u.add(card)
self.o.add(card)
card.owner.foo.add.assert_called_once_with(card)
card.owner.bar.add.assert_called_once_with(card)
def test_move(self):
self.o.add(self.card)
self.card.zone = self.o # on actual cards this is a property
with self.assertTriggers(event=ENTER, card=self.card, zone=self.u):
self.u.move(self.card)
self.card.zone = self.u
self.assertIn(self.card, self.u)
with self.assertTriggers(event=ENTER, card=self.card, zone=self.o):
self.o.move(self.card)
self.assertIn(self.card, self.o)
def test_move_to_self(self):
self.resetEvents()
# shouldn't even be checking library[0].zone
with self.assertRaises(ValueError):
self.u.move(self.library[0])
with self.assertRaises(ValueError):
self.o.move(self.library[0])
# wasn't added twice nor removed
self.assertIn(self.library[0], self.u)
self.assertEqual(self.o.count(self.library[0]), 1)
self.assertFalse(self.events.trigger.called)
def test_pop(self):
self.resetEvents()
e = self.u.pop()
self.assertLastEventsWere([dict(event=LEAVE, card=e, zone=self.u)])
self.resetEvents()
f = self.o.pop()
self.assertLastEventsWere([dict(event=LEAVE, card=f, zone=self.o)])
self.assertEqual(set(self.u), set(self.library) - {e})
self.assertEqual(list(self.o), self.library[:-1])
def test_remove(self):
e = self.library[-7]
self.library.remove(e)
with self.assertTriggers(event=LEAVE, card=e, zone=self.u):
self.u.remove(e)
with self.assertTriggers(event=LEAVE, card=e, zone=self.o):
self.o.remove(e)
self.assertEqual(set(self.u), set(self.library))
self.assertEqual(list(self.o), self.library)
self.assertRaises(ValueError, self.u.remove, object())
self.assertRaises(ValueError, self.o.remove, object())
def test_update(self):
self.u.update(range(4))
for i in range(4):
self.assertIn(i, self.u)
self.assertEqual(len(self.u), len(self.library) + 4)
evs = [dict(event=ENTER, card=i, zone=self.u) for i in range(4)]
self.assertLastEventsWere(evs)
self.resetEvents()
self.o.update(range(4))
self.assertEqual(self.o[-4:], range(4))
self.assertEqual(len(self.o), len(self.library) + 4)
evs = [dict(event=ENTER, card=i, zone=self.o) for i in range(4)]
self.assertLastEventsWere(evs)
def test_silent(self):
self.o.add(self.card)
self.card.zone = self.o
self.resetEvents()
self.u.add(20, silent=True)
self.o.add(20, silent=True)
self.u.remove(self.library[0], silent=True)
self.o.remove(self.library[0], silent=True)
self.u.pop(silent=True)
self.o.pop(silent=True)
self.u.move(self.card, silent=True)
self.card.zone = self.u
self.o.move(self.card, silent=True)
self.u.update(range(10), silent=True)
self.o.update(range(10), silent=True)
self.assertFalse(self.events.trigger.called)
def test_iterable(self):
i = range(10)
# TODO: This is incomplete, all the methods don't take iterables
o = z.OrderedZone(game=None, name="Emerald Hill", contents=i)
u = z.UnorderedZone(game=None, name="Emerald Hill", contents=i)
i.pop()
self.assertEqual(list(o), range(10))
self.assertEqual(list(u), range(10))
class TestOrderedZone(ZoneTest):
def test_reversed(self):
self.assertEqual(list(reversed(self.o)), list(reversed(self.library)))
def test_getitem(self):
for i, e in enumerate(self.library):
self.assertEqual(self.o[i], e)
self.assertEqual(self.o[2:7:2], self.library[2:7:2])
def test_set_del_item(self):
self.assertRaises(AttributeError, getattr, self.o, "__setitem__")
self.assertRaises(AttributeError, getattr, self.o, "__delitem__")
def test_count(self):
o = z.OrderedZone(game=None, name="Emerald Hill",
contents=[1, 1, 1, 2, 2, 3])
for i, e in enumerate(range(3, 0, -1), 1):
self.assertEqual(o.count(e), i)
def test_index(self):
e = self.library[4]
self.assertEqual(self.o.index(e), 4)
def test_pop_index(self):
e1 = self.o.pop(0)
e2 = self.o.pop(4)
self.library.pop(0)
self.library.pop(4)
self.assertEqual(list(self.o), self.library)
self.assertLastEventsWere([
{"event" : LEAVE, "card" : e1, "zone" : self.o},
{"event" : LEAVE, "card" : e2, "zone" : self.o},
])
def test_reverse(self):
self.o.reverse()
self.assertEqual(list(self.o), list(reversed(self.library)))
def test_shuffle(self):
with mock.patch("cardboard.zone.random.shuffle") as shuffle:
self.o.shuffle()
shuffle.assert_called_once_with(self.o._order)
class TestZone(unittest.TestCase):
def test_zone(self):
c = mock.Mock()
for zone in ["battlefield", "exile", "hand"]:
n = z.zone[zone](game=None, contents=[c])
self.assertIsInstance(n, z.UnorderedZone)
self.assertEquals(n.name, zone)
self.assertIn(c, n)
for zone in ["graveyard", "library", "stack"]:
n = z.zone[zone](game=None, contents=[c])
self.assertIsInstance(n, z.OrderedZone)
self.assertEquals(n.name, zone)
self.assertIn(c, n)
|
Julian/cardboard
|
cardboard/tests/test_zone.py
|
Python
|
mit
| 8,874
|
[
"CASINO"
] |
ae0c0f81de75283868452c011c98c4906561dfd95919c241116b6cd905d2de6c
|
import tensorflow as tf
import numpy as np
from elbow import Model, Gaussian, BernoulliMatrix, BetaMatrix, DirichletMatrix
from elbow.models.factorizations import GMMClustering
def clustering_gmm_model(n_clusters = 4,
cluster_center_std = 5.0,
cluster_spread_std = 2.0,
n_points = 500,
dim = 2):
centers = Gaussian(mean=0.0, std=cluster_center_std, shape=(n_clusters, dim), name="centers")
weights = DirichletMatrix(alpha=1.0,
shape=(n_clusters,),
name="weights")
X = GMMClustering(weights=weights, centers=centers,
std=cluster_spread_std, shape=(n_points, dim), name="X")
jm = Model(X)
return jm
def main():
jm = clustering_gmm_model()
sampled = jm.sample()
jm["X"].observe(sampled["X"])
jm.train()
posterior = jm.posterior()
weights = np.exp(posterior["q_weights"]["mean"])
weights /= np.sum(weights)
print "sampled cluster weights", sampled["weights"]
print "inferred weights", weights
print "sampled cluster centers", sampled["centers"]
print "inferred cluster centers", posterior["q_centers"]["mean"]
if __name__ == "__main__":
main()
|
davmre/bayesflow
|
examples/clustering.py
|
Python
|
bsd-3-clause
| 1,309
|
[
"Gaussian"
] |
bb5fa757f16c9a5914038a6ca72260cf5fc8d118f6439d91758fb5f9a9fa1905
|
#!/usr/bin/python2
import optparse
import subprocess
import sys
from builds import GporcaBuild, GpcodegenBuild, GporcacodegenBuild
def make(num_cpus):
return subprocess.call("make -j %d" % (num_cpus), cwd="gpdb_src", shell=True)
def install(output_dir):
subprocess.call("make install", cwd="gpdb_src", shell=True)
subprocess.call("mkdir -p " + output_dir, shell=True)
return subprocess.call("cp -r /usr/local/gpdb/* " + output_dir, shell=True)
def unittest():
return subprocess.call("make -s unittest-check", cwd="gpdb_src/src/backend", shell=True)
def main():
parser = optparse.OptionParser()
parser.add_option("--build_type", dest="build_type", default="RELEASE")
parser.add_option("--mode", choices=['orca', 'codegen', 'orca_codegen'])
parser.add_option("--compiler", dest="compiler")
parser.add_option("--cxxflags", dest="cxxflags")
parser.add_option("--output_dir", dest="output_dir", default="install")
(options, args) = parser.parse_args()
if options.mode == 'orca':
ciCommon = GporcaBuild()
elif options.mode == 'codegen':
ciCommon = GpcodegenBuild()
elif options.mode == 'orca_codegen':
ciCommon = GporcacodegenBuild()
status = ciCommon.install_system_deps()
if status:
return status
for dependency in args:
status = ciCommon.install_dependency(dependency)
if status:
return status
status = ciCommon.configure()
if status:
return status
status = make(ciCommon.num_cpus())
if status:
return status
status = unittest()
if status:
return status
status = install(options.output_dir)
if status:
return status
return 0
if __name__ == "__main__":
sys.exit(main())
|
ahachete/gpdb
|
concourse/scripts/build_gpdb.py
|
Python
|
apache-2.0
| 1,782
|
[
"ORCA"
] |
37835fd92fb254a636ccd9686dea8a97842a24da68c0738902a52217b44f4477
|
"""Define a helper function for running tests
The skeleton for making a new setup is as follows:
from ase.optimize.test import run_test
def get_atoms():
return Atoms('H')
def get_calculator():
return EMT()
run_test(get_atoms, get_calculator, 'Hydrogen')
"""
import time
import matplotlib
#matplotlib.rcParams['backend']="Agg"
from ase.optimize.bfgs import BFGS
from ase.optimize.lbfgs import LBFGS, LBFGSLineSearch
from ase.optimize.fire import FIRE
from ase.optimize.mdmin import MDMin
from ase.optimize.sciopt import SciPyFminCG
from ase.optimize.sciopt import SciPyFminBFGS
from ase.optimize.bfgslinesearch import BFGSLineSearch
from ase.optimize.oldqn import GoodOldQuasiNewton
from ase.parallel import rank, paropen
import matplotlib.pyplot as pl
import numpy as np
import traceback
optimizers = [
'BFGS',
'LBFGS',
'LBFGSLineSearch',
'FIRE',
'MDMin',
'SciPyFminCG',
'SciPyFminBFGS',
'BFGSLineSearch',
'GoodOldQuasiNewton'
]
def get_optimizer(optimizer):
if optimizer == 'BFGS': return BFGS
elif optimizer == 'LBFGS': return LBFGS
elif optimizer == 'LBFGSLineSearch': return LBFGSLineSearch
elif optimizer == 'FIRE': return FIRE
elif optimizer == 'MDMin': return MDMin
elif optimizer == 'SciPyFminCG': return SciPyFminCG
elif optimizer == 'SciPyFminBFGS': return SciPyFminBFGS
elif optimizer == 'BFGSLineSearch': return BFGSLineSearch
elif optimizer == 'GoodOldQuasiNewton': return GoodOldQuasiNewton
def run_test(get_atoms, get_calculator, name,
fmax=0.05, steps=100, plot=True):
plotter = Plotter(name, fmax)
csvwriter = CSVWriter(name)
# write header
row = ['Optimizer', 'Optimizer Steps', 'Force evaluations', 'Energy']
row.extend(['Time [sec]', 'Note'])
format = '%s,%s,%s,%s,%s,%s\n'
csvwriter.write(row, format)
for optimizer in optimizers:
note = ''
logname = name + '-' + optimizer
atoms = get_atoms()
atoms.set_calculator(get_calculator())
opt = get_optimizer(optimizer)
relax = opt(atoms, logfile=None)
#logfile = logname + '.log',
#trajectory = logname + '.traj')
obs = DataObserver(atoms)
relax.attach(obs)
t = time.time()
try:
relax.run(fmax = fmax, steps = steps)
E = atoms.get_potential_energy()
if relax.get_number_of_steps() == steps:
note = 'Not converged in %i steps' % steps
except Exception:
traceback.print_exc()
note = 'An exception occurred'
E = np.nan
t = time.time() - t
nsteps = relax.get_number_of_steps()
if hasattr(relax, 'force_calls'):
fc = relax.force_calls
if rank == 0:
print('%-15s %-15s %3i %8.3f (%3i) %s' % (name, optimizer, nsteps, E, fc, note))
else:
fc = nsteps
if rank == 0:
print('%-15s %-15s %3i %8.3f %s' % (name, optimizer, nsteps, E, note))
plotter.plot(optimizer, obs.get_E(), obs.get_fmax())
format = '%s,%i,%i,%.5f,%i,%s\n'
row = [optimizer, nsteps, fc, E]
row.extend([int(t), note])
csvwriter.write(row, format)
plotter.save()
csvwriter.finalize()
class Plotter:
def __init__(self, name, fmax):
self.name = name
self.fmax = fmax
if rank == 0:
self.fig = pl.figure(figsize=[12.0, 9.0])
self.axes0 = self.fig.add_subplot(2, 1, 1)
self.axes1 = self.fig.add_subplot(2, 1, 2)
def plot(self, optimizer, E, fmax):
if rank == 0:
self.axes0.plot(E, label = optimizer)
self.axes1.plot(fmax)
def save(self, format='png'):
if rank == 0:
self.axes0.legend()
self.axes0.set_title(self.name)
self.axes0.set_ylabel('E [eV]')
#self.axes0.set_yscale('log')
self.axes1.set_xlabel('steps')
self.axes1.set_ylabel('fmax [eV/A]')
self.axes1.set_yscale('log')
self.axes1.axhline(self.fmax, color='k', linestyle='--')
self.fig.savefig(self.name + '.' + format)
class CSVWriter:
def __init__(self, name):
self.f = paropen(name + '.csv', 'w')
def write(self, row, format):
self.f.write(format % tuple(row))
def finalize(self):
self.f.close()
class DataObserver:
def __init__(self, atoms):
self.atoms = atoms
self.E = []
self.fmax = []
def __call__(self):
self.E.append(self.atoms.get_potential_energy())
self.fmax.append(np.sqrt((self.atoms.get_forces()**2).sum(axis=1)).max())
def get_E(self):
return np.array(self.E)
def get_fmax(self):
return np.array(self.fmax)
|
suttond/MODOI
|
ase/optimize/test/__init__.py
|
Python
|
lgpl-3.0
| 4,863
|
[
"ASE"
] |
4779a0382e33dd7c5734085e3b342ba3a18618f429cef39962fa18504dfb4b62
|
from __future__ import print_function
import os, sys, inspect
import h5py
import numpy as np
import matplotlib
import random
import math
import multiprocessing
from PIL import Image
from Crypto.Random.random import randint
from functools import partial
# Load the configuration file
import config
cmd_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
if cmd_folder not in sys.path:
sys.path.append(cmd_folder)
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],config.caffe_path+"/python")))
if cmd_subfolder not in sys.path:
sys.path.append(cmd_subfolder)
sys.path.append(config.caffe_path+"/python")
# Ensure correct compilation of Caffe and Pycaffe
if config.library_compile:
cpus = multiprocessing.cpu_count()
cwd = os.getcwd()
os.chdir(config.caffe_path)
result = os.system("make all -j %s" % cpus)
if result != 0:
sys.exit(result)
result = os.system("make pycaffe -j %s" % cpus)
if result != 0:
sys.exit(result)
os.chdir(cwd)
# Import pycaffe
import caffe
from caffe import layers as L, params as P, to_proto
from caffe.proto import caffe_pb2
import netconf
# General variables
# Size of a float variable
fsize = 4
def compute_memory_weights(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
memory += shape_arr[i][1]
return memory
def compute_memory_buffers(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
memory = max(memory, shape_arr[i][0])
return memory
def compute_memory_blobs(shape_arr):
memory = 0
for i in range(0,len(shape_arr)):
mem = fsize * shape_arr[i][2]
for j in range(0,len(shape_arr[i][4])):
mem *= shape_arr[i][4][j]
memory += mem
return memory
def update_shape(shape_arr, update):
last_shape = shape_arr[-1]
new_shape = [update[0](last_shape[0]), update[1](last_shape[1]), update[2](last_shape[2]),
[update[3][min(i,len(update[3])-1)](last_shape[3][i]) for i in range(0,len(last_shape[3]))],
[update[4][min(i,len(update[4])-1)](last_shape[4][i]) for i in range(0,len(last_shape[4]))]]
shape_arr += [new_shape]
print ("TEST B: %s" % [update[4][min(i,len(update[4])-1)]([1,1,1][i]) for i in range(0,3)])
return shape_arr
def data_layer(shape):
data, label = L.MemoryData(dim=shape, ntop=2)
return data, label
def conv_relu(run_shape, bottom, num_output, kernel_size=[3], stride=[1], pad=[0], dilation=[1], group=1, weight_std=0.01):
# The convolution buffer and weight memory
weight_mem = fsize * num_output * run_shape[-1][2]
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= kernel_size[min(i,len(kernel_size)-1)]
conv_buff *= run_shape[-1][4][i]
weight_mem *= kernel_size[min(i,len(kernel_size)-1)]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
conv = L.Convolution(bottom, kernel_size=kernel_size, stride=stride, dilation=dilation,
num_output=num_output, pad=pad, group=group,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
return conv, L.ReLU(conv, in_place=True, negative_slope=0.005)
def convolution(run_shape, bottom, num_output, kernel_size=[3], stride=[1], pad=[0], dilation=[1], group=1, weight_std=0.01):
# The convolution buffer and weight memory
weight_mem = fsize * num_output * run_shape[-1][2]
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= kernel_size[min(i,len(kernel_size)-1)]
conv_buff *= run_shape[-1][4][i]
weight_mem *= kernel_size[min(i,len(kernel_size)-1)]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.Convolution(bottom, kernel_size=kernel_size, stride=stride, dilation=dilation,
num_output=num_output, pad=pad, group=group,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
def max_pool(run_shape, bottom, kernel_size=[2], stride=[2], pad=[0], dilation=[1]):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: x]
update += [[lambda x, i=i: x * dilation[min(i,len(dilation)-1)] for i in range(0,len(run_shape[-1][4]))]]
# Strictly speaking this update rule is not complete, but should be sufficient for USK
if dilation[0] == 1 and kernel_size[0] == stride[0]:
update += [[lambda x, i=i: x / (kernel_size[min(i,len(kernel_size)-1)]) for i in range(0,len(run_shape[-1][4]))]]
else:
update += [[lambda x, i=i: x - (kernel_size[min(i,len(kernel_size)-1)] - 1) * (run_shape[-1][3][i]) for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.Pooling(bottom, pool=P.Pooling.MAX, kernel_size=kernel_size, stride=stride, pad=pad, dilation=dilation)
def upconv(run_shape, bottom, num_output_dec, num_output_conv, weight_std=0.01, kernel_size=[2], stride=[2]):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: num_output_dec]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: kernel_size[min(i,len(kernel_size)-1)] * x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
deconv = L.Deconvolution(bottom, convolution_param=dict(num_output=num_output_dec, kernel_size=kernel_size, stride=stride, pad=[0], dilation=[1], group=num_output_dec,
weight_filler=dict(type='constant', value=1), bias_term=False),
param=dict(lr_mult=0, decay_mult=0))
# The convolution buffer and weight memory
weight_mem = fsize * num_output_conv * num_output_dec
conv_buff = fsize * run_shape[-1][2]
for i in range(0,len(run_shape[-1][4])):
conv_buff *= 2
conv_buff *= run_shape[-1][4][i]
# Shape update rules
update = [lambda x: conv_buff, lambda x: weight_mem, lambda x: num_output_conv]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
conv = L.Convolution(deconv, num_output=num_output_conv, kernel_size=[1], stride=[1], pad=[0], dilation=[1], group=1,
param=[dict(lr_mult=1),dict(lr_mult=2)],
weight_filler=dict(type='gaussian', std=weight_std),
bias_filler=dict(type='constant'))
return deconv, conv
def mergecrop(run_shape, bottom_a, bottom_b):
# Shape update rules
update = [lambda x: 0, lambda x: 0, lambda x: 2*x]
update += [[lambda x: x, lambda x: x, lambda x: x]]
update += [[lambda x, i=i: x for i in range(0,len(run_shape[-1][4]))]]
update_shape(run_shape, update)
return L.MergeCrop(bottom_a, bottom_b, forward=[1,1], backward=[1,1])
def implement_usknet(net, run_shape, fmaps_start, fmaps_end):
# Chained blob list to construct the network (forward direction)
blobs = []
# All networks start with data
blobs = blobs + [net.data]
fmaps = fmaps_start
if netconf.unet_depth > 0:
# U-Net downsampling; 2*Convolution+Pooling
for i in range(0, netconf.unet_depth):
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu] # This is the blob of interest for mergecrop (index 2 + 3 * i)
pool = max_pool(run_shape, blobs[-1], kernel_size=netconf.unet_downsampling_strategy[i], stride=netconf.unet_downsampling_strategy[i])
blobs = blobs + [pool]
fmaps = netconf.unet_fmap_inc_rule(fmaps)
# If there is no SK-Net component, fill with 2 convolutions
if (netconf.unet_depth > 0 and netconf.sknet_conv_depth == 0):
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
# Else use the SK-Net instead
else:
for i in range(0, netconf.sknet_conv_depth):
# TODO: Not implemented yet (fixme)
run_shape = run_shape
if netconf.unet_depth > 0:
# U-Net upsampling; Upconvolution+MergeCrop+2*Convolution
for i in range(0, netconf.unet_depth):
deconv, conv = upconv(run_shape, blobs[-1], fmaps, netconf.unet_fmap_dec_rule(fmaps), kernel_size=netconf.unet_downsampling_strategy[i], stride=netconf.unet_downsampling_strategy[i], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [conv]
fmaps = netconf.unet_fmap_dec_rule(fmaps)
# Here, layer (2 + 3 * i) with reversed i (high to low) is picked
mergec = mergecrop(run_shape, blobs[-1], blobs[-1 + 3 * (netconf.unet_depth - i)])
blobs = blobs + [mergec]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv, relu = conv_relu(run_shape, blobs[-1], fmaps, kernel_size=[3], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [relu]
conv = convolution(run_shape, blobs[-1], fmaps_end, kernel_size=[1], weight_std=math.sqrt(2.0/float(run_shape[-1][2]*pow(3,len(run_shape[-1][4])))))
blobs = blobs + [conv]
# Return the last blob of the network (goes to error objective)
return blobs[-1]
def caffenet(netmode):
# Start Caffe proto net
net = caffe.NetSpec()
# Specify input data structures
if netmode == caffe_pb2.TEST:
if netconf.loss_function == 'malis':
fmaps_end = 11
if netconf.loss_function == 'euclid':
fmaps_end = 11
if netconf.loss_function == 'softmax':
fmaps_end = 2
net.data, net.datai = data_layer([1,1,44,132,132])
net.silence = L.Silence(net.datai, ntop=0)
# Shape specs:
# 00. Convolution buffer size
# 01. Weight memory size
# 03. Num. channels
# 04. [d] parameter running value
# 05. [w] parameter running value
run_shape_in = [[0,0,1,[1,1,1],[44,132,132]]]
run_shape_out = run_shape_in
last_blob = implement_usknet(net, run_shape_out, 64, fmaps_end)
# Implement the prediction layer
if netconf.loss_function == 'malis':
net.prob = L.Sigmoid(last_blob, ntop=1)
if netconf.loss_function == 'euclid':
net.prob = L.Sigmoid(last_blob, ntop=1)
if netconf.loss_function == 'softmax':
net.prob = L.Softmax(last_blob, ntop=1)
for i in range(0,len(run_shape_out)):
print(run_shape_out[i])
print("Max. memory requirements: %s B" % (compute_memory_buffers(run_shape_out)+compute_memory_weights(run_shape_out)+compute_memory_blobs(run_shape_out)))
print("Weight memory: %s B" % compute_memory_weights(run_shape_out))
print("Max. conv buffer: %s B" % compute_memory_buffers(run_shape_out))
else:
if netconf.loss_function == 'malis':
net.data, net.datai = data_layer([1,1,44,132,132])
net.label, net.labeli = data_layer([1,1,16,44,44])
net.label_affinity, net.label_affinityi = data_layer([1,11,16,44,44])
net.affinity_edges, net.affinity_edgesi = data_layer([1,1,11,3])
net.silence = L.Silence(net.datai, net.labeli, net.label_affinityi, net.affinity_edgesi, ntop=0)
fmaps_end = 11
if netconf.loss_function == 'euclid':
net.data, net.datai = data_layer([1,1,44,132,132])
net.label, net.labeli = data_layer([1,11,16,44,44])
net.scale, net.scalei = data_layer([1,11,16,44,44])
net.silence = L.Silence(net.datai, net.labeli, net.scalei, ntop=0)
fmaps_end = 11
if netconf.loss_function == 'softmax':
net.data, net.datai = data_layer([1,1,44,132,132])
# Currently only supports binary classification
net.label, net.labeli = data_layer([1,1,16,44,44])
net.silence = L.Silence(net.datai, net.labeli, ntop=0)
fmaps_end = 2
run_shape_in = [[0,1,1,[1,1,1],[44,132,132]]]
run_shape_out = run_shape_in
# Start the actual network
last_blob = implement_usknet(net, run_shape_out, 64, fmaps_end)
for i in range(0,len(run_shape_out)):
print(run_shape_out[i])
print("Max. memory requirements: %s B" % (compute_memory_buffers(run_shape_out)+compute_memory_weights(run_shape_out)+2*compute_memory_blobs(run_shape_out)))
print("Weight memory: %s B" % compute_memory_weights(run_shape_out))
print("Max. conv buffer: %s B" % compute_memory_buffers(run_shape_out))
# Implement the loss
if netconf.loss_function == 'malis':
last_blob = L.Sigmoid(last_blob, in_place=True)
net.loss = L.MalisLoss(last_blob, net.label_affinity, net.label, net.affinity_edges, ntop=0)
if netconf.loss_function == 'euclid':
last_blob = L.Sigmoid(last_blob, in_place=True)
net.loss = L.EuclideanLoss(last_blob, net.label, net.scale, ntop=0)
if netconf.loss_function == 'softmax':
net.loss = L.SoftmaxWithLoss(last_blob, net.label, ntop=0)
# Return the protocol buffer of the generated network
return net.to_proto()
def make_net():
with open('net/net_train.prototxt', 'w') as f:
print(caffenet(caffe_pb2.TRAIN), file=f)
with open('net/net_test.prototxt', 'w') as f:
print(caffenet(caffe_pb2.TEST), file=f)
def make_solver():
with open('net/solver.prototxt', 'w') as f:
print('train_net: \"net/net_train.prototxt\"', file=f)
print('base_lr: 0.00001', file=f)
print('momentum: 0.99', file=f)
print('weight_decay: 0.000005', file=f)
print('lr_policy: \"inv\"', file=f)
print('gamma: 0.0001', file=f)
print('power: 0.75', file=f)
print('max_iter: 100000', file=f)
print('snapshot: 2000', file=f)
print('snapshot_prefix: \"net_\"', file=f)
print('display: 50', file=f)
make_net()
make_solver()
|
naibaf7/caffe_neural_models
|
dataset_06/network_generator.py
|
Python
|
bsd-2-clause
| 16,077
|
[
"Gaussian"
] |
4b62a5b47ab146216dccf0e672fcb5d89e15a08e61154871819510abc9c42ca8
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = "Brian Lehman, Scott Hendrickson"
import sys
import re
import codecs
reload(sys)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stdin = codecs.getreader('utf-8')(sys.stdin)
import math
import pprint
import json
import fiona
from shapely.geometry import Point, shape, Polygon, box
from collections import defaultdict
import argparse
import os
import pickle
########################
# functions
def tree(): return defaultdict(tree)
def grid_finder(x,y):
return (int((math.floor(x)-grid_boundaries[0])/delta)
,int((math.floor(y)-grid_boundaries[1])/delta))
def topic_args():
parser = argparse.ArgumentParser(description="Reverse geo coder returns location info given a set of lon,lat")
parser.add_argument("file_name"
, metavar= "file_name"
, nargs="?"
, default=[]
, help="Input file name (optional).")
parser.add_argument("-b"
, "--bounding-box"
, dest="grid_boundaries"
, default="-185,15,-65,70"
, help="Set bounding box for region to include (default: [-185,15,-65,70])")
parser.add_argument("-d"
, "--delta"
, dest="delta"
, default=5
, help="Set the number of degrees between grid coords (default: 5)")
parser.add_argument("-g"
, "--use-saved-grid"
, dest="use_saved_grid"
, default=False
, action="store_true"
, help="Save grid or use previously saved version in data/grid.json")
parser.add_argument("-s"
, "--shape-file-path"
, dest="shape_file_path"
, default="data/tl_2013_us_county.shp"
, help="Set shapefile path (default: data/tl_2013_us_county.shp)")
parser.add_argument("-t"
, "--tweet-input"
, dest="tweet_input"
, default=False
, action="store_true"
, help="Set input as tweet payload instead of coordinates (in progress)")
return parser
def build_grid():
#grid_boundaries=(-185,15,-65,70) # upright edge is plus delta (lower 48 states)
grid={(i,j):{}
for i in range((grid_boundaries[2]-grid_boundaries[0])/delta)
for j in range((grid_boundaries[3]-grid_boundaries[1])/delta) }
with fiona.open(options.shape_file_path) as fc:
print >>sys.stderr, fc.driver,"###",fc.schema,"###", len(fc),"###",fc.crs
print >> sys.stderr,fc.schema
print >>sys.stderr, "Number of records:", len(fc)
print >>sys.stderr, "Bounds of all records:", fc.bounds
print >>sys.stderr, "Bounds applied:",grid_boundaries
print >> sys.stderr,"######## indexing shapes to grid ########"
print >> sys.stderr,"shapes complete:"
c=0
for feature in fc:
c+=1
GEOID=str(feature['properties']['GEOID'])
NAME=feature['properties']['NAME']
INTPTLON=float(feature['properties']['INTPTLON'])
INTPTLAT=float(feature['properties']['INTPTLAT'])
shp=shape(feature['geometry']) # list of coordinates of geometric shape
bb=box(*shp.bounds) #box(minx,miny,maxx,maxy)) creates one boxlike shape to rule them all
for i,j in grid:
grid_box=box(i*delta+grid_boundaries[0]
,j*delta+grid_boundaries[1]
,(i+1)*delta+grid_boundaries[0]
,(j+1)*delta+grid_boundaries[1] )
if grid_box.intersects(bb): #http://toblerity.org/shapely/manual.html#object.intersects
grid[(i,j)][bb]=(shp,GEOID,NAME,INTPTLON,INTPTLAT) # (county shape, countyID)
if c%100==0:
print >> sys.stderr, c
return grid
if __name__ == '__main__':
options = topic_args().parse_args()
grid_boundaries=[int(item) for item in options.grid_boundaries.split(",")]
delta=int(options.delta)
if not options.use_saved_grid:
grid=build_grid()
else:
if not os.path.isfile("./data/grid.json"):
print >>sys.stderr, "creating ./data/grid.json"
grid=build_grid()
if not os.path.exists("./data"):
os.makedirs("./data")
print >>sys.stderr, "saving file ./data/grid.json"
with open("./data/grid.json","wb") as g:
pickle.dump(grid,g)
else:
print >>sys.stderr, "using ./data/grid.json"
grid=pickle.load(open("./data/grid.json"))
counter=0
in_grid_not_in_county=0
grid_counter=0
print >> sys.stderr,"######## locating geo coords in grid ########"
for line in sys.stdin:
#( lng, lat ) = coord #NOTE:the input file must contain (lng,lat)
values=line.replace("(","").replace(")","").replace("[","").replace("]","").strip().split(",")
lng = float(values[0])
lat = float(values[1])
point = Point(float(lng), float(lat))
coords=grid_finder(lng,lat)
found=False
if coords not in grid:
counter+=1
print >> sys.stderr,"not in grid:{},not in county:{},found{}".format(counter,in_grid_not_in_county,grid_counter)
print >> sys.stderr,"{},{}: not in grid".format(lng,lat)
continue
for box in grid[coords]:
if box.contains(point):
if grid[coords][box][0].contains(point):
e=tree()
found=True
grid_counter+=1
e["coords"]=(lng,lat)
e["GEOID"]=grid[coords][box][1]
e["centroid"]=(grid[coords][box][3],grid[coords][box][4])
e["county"]=grid[coords][box][2]
print json.dumps(e)
break #point found, no need to continue searching
if not found:
in_grid_not_in_county+=1
print >> sys.stderr,"######## DONE ########"
print >> sys.stderr, "{} points outside of grid".format(counter)
print >> sys.stderr, "{} points in grid but not in a county".format(in_grid_not_in_county)
print >> sys.stderr, "{} points in grid and in county".format(grid_counter)
|
gscottstukey/Data-Science-45min-Intros
|
gis_tools/rev_geo.py
|
Python
|
unlicense
| 6,294
|
[
"Brian"
] |
74442c39467cb28159208098324e8a662087dc90e11d120cf13f157f9978f856
|
"""
Views related to the Custom Courses feature.
"""
import csv
import datetime
import functools
import json
import logging
import pytz
from contextlib import contextmanager
from copy import deepcopy
from cStringIO import StringIO
from django.core.urlresolvers import reverse
from django.http import (
HttpResponse,
HttpResponseForbidden,
)
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.http import Http404
from django.shortcuts import redirect
from django.utils.translation import ugettext as _
from django.views.decorators.cache import cache_control
from django.views.decorators.csrf import ensure_csrf_cookie
from django.contrib.auth.models import User
from courseware.courses import get_course_by_id
from courseware.field_overrides import disable_overrides
from courseware.grades import iterate_grades_for
from courseware.model_data import FieldDataCache
from courseware.module_render import get_module_for_descriptor
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.keys import CourseKey
from ccx_keys.locator import CCXLocator
from student.roles import CourseCcxCoachRole # pylint: disable=import-error
from student.models import CourseEnrollment
from instructor.offline_gradecalc import student_grades # pylint: disable=import-error
from instructor.views.api import _split_input_list # pylint: disable=import-error
from instructor.views.tools import get_student_from_identifier # pylint: disable=import-error
from instructor.enrollment import (
enroll_email,
unenroll_email,
get_email_params,
)
from .models import CustomCourseForEdX
from .overrides import (
clear_override_for_ccx,
get_override_for_ccx,
override_field_for_ccx,
clear_ccx_field_info_from_ccx_map,
bulk_delete_ccx_override_fields,
)
log = logging.getLogger(__name__)
TODAY = datetime.datetime.today # for patching in tests
def coach_dashboard(view):
"""
View decorator which enforces that the user have the CCX coach role on the
given course and goes ahead and translates the course_id from the Django
route into a course object.
"""
@functools.wraps(view)
def wrapper(request, course_id):
"""
Wraps the view function, performing access check, loading the course,
and modifying the view's call signature.
"""
course_key = CourseKey.from_string(course_id)
ccx = None
if isinstance(course_key, CCXLocator):
ccx_id = course_key.ccx
ccx = CustomCourseForEdX.objects.get(pk=ccx_id)
course_key = ccx.course_id
role = CourseCcxCoachRole(course_key)
if not role.has_user(request.user):
return HttpResponseForbidden(
_('You must be a CCX Coach to access this view.'))
course = get_course_by_id(course_key, depth=None)
# if there is a ccx, we must validate that it is the ccx for this coach
if ccx is not None:
coach_ccx = get_ccx_for_coach(course, request.user)
if coach_ccx is None or coach_ccx.id != ccx.id:
return HttpResponseForbidden(
_('You must be the coach for this ccx to access this view')
)
return view(request, course, ccx)
return wrapper
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def dashboard(request, course, ccx=None):
"""
Display the CCX Coach Dashboard.
"""
# right now, we can only have one ccx per user and course
# so, if no ccx is passed in, we can sefely redirect to that
if ccx is None:
ccx = get_ccx_for_coach(course, request.user)
if ccx:
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
context = {
'course': course,
'ccx': ccx,
}
if ccx:
ccx_locator = CCXLocator.from_course_locator(course.id, ccx.id)
schedule = get_ccx_schedule(course, ccx)
grading_policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy)
context['schedule'] = json.dumps(schedule, indent=4)
context['save_url'] = reverse(
'save_ccx', kwargs={'course_id': ccx_locator})
context['ccx_members'] = CourseEnrollment.objects.filter(course_id=ccx_locator)
context['gradebook_url'] = reverse(
'ccx_gradebook', kwargs={'course_id': ccx_locator})
context['grades_csv_url'] = reverse(
'ccx_grades_csv', kwargs={'course_id': ccx_locator})
context['grading_policy'] = json.dumps(grading_policy, indent=4)
context['grading_policy_url'] = reverse(
'ccx_set_grading_policy', kwargs={'course_id': ccx_locator})
else:
context['create_ccx_url'] = reverse(
'create_ccx', kwargs={'course_id': course.id})
return render_to_response('ccx/coach_dashboard.html', context)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def create_ccx(request, course, ccx=None):
"""
Create a new CCX
"""
name = request.POST.get('name')
# prevent CCX objects from being created for deprecated course ids.
if course.id.deprecated:
messages.error(request, _(
"You cannot create a CCX from a course using a deprecated id. "
"Please create a rerun of this course in the studio to allow "
"this action."))
url = reverse('ccx_coach_dashboard', kwargs={'course_id': course.id})
return redirect(url)
ccx = CustomCourseForEdX(
course_id=course.id,
coach=request.user,
display_name=name)
ccx.save()
# Make sure start/due are overridden for entire course
start = TODAY().replace(tzinfo=pytz.UTC)
override_field_for_ccx(ccx, course, 'start', start)
override_field_for_ccx(ccx, course, 'due', None)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
ccx_id = CCXLocator.from_course_locator(course.id, ccx.id) # pylint: disable=no-member
url = reverse('ccx_coach_dashboard', kwargs={'course_id': ccx_id})
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def save_ccx(request, course, ccx=None):
"""
Save changes to CCX.
"""
if not ccx:
raise Http404
def override_fields(parent, data, graded, earliest=None, ccx_ids_to_delete=None):
"""
Recursively apply CCX schedule data to CCX by overriding the
`visible_to_staff_only`, `start` and `due` fields for units in the
course.
"""
if ccx_ids_to_delete is None:
ccx_ids_to_delete = []
blocks = {
str(child.location): child
for child in parent.get_children()}
for unit in data:
block = blocks[unit['location']]
override_field_for_ccx(
ccx, block, 'visible_to_staff_only', unit['hidden'])
start = parse_date(unit['start'])
if start:
if not earliest or start < earliest:
earliest = start
override_field_for_ccx(ccx, block, 'start', start)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'start_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'start')
due = parse_date(unit['due'])
if due:
override_field_for_ccx(ccx, block, 'due', due)
else:
ccx_ids_to_delete.append(get_override_for_ccx(ccx, block, 'due_id'))
clear_ccx_field_info_from_ccx_map(ccx, block, 'due')
if not unit['hidden'] and block.graded:
graded[block.format] = graded.get(block.format, 0) + 1
children = unit.get('children', None)
if children:
override_fields(block, children, graded, earliest, ccx_ids_to_delete)
return earliest, ccx_ids_to_delete
graded = {}
earliest, ccx_ids_to_delete = override_fields(course, json.loads(request.body), graded, [])
bulk_delete_ccx_override_fields(ccx, ccx_ids_to_delete)
if earliest:
override_field_for_ccx(ccx, course, 'start', earliest)
# Attempt to automatically adjust grading policy
changed = False
policy = get_override_for_ccx(
ccx, course, 'grading_policy', course.grading_policy
)
policy = deepcopy(policy)
grader = policy['GRADER']
for section in grader:
count = graded.get(section.get('type'), 0)
if count < section['min_count']:
changed = True
section['min_count'] = count
if changed:
override_field_for_ccx(ccx, course, 'grading_policy', policy)
return HttpResponse(
json.dumps({
'schedule': get_ccx_schedule(course, ccx),
'grading_policy': json.dumps(policy, indent=4)}),
content_type='application/json',
)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def set_grading_policy(request, course, ccx=None):
"""
Set grading policy for the CCX.
"""
if not ccx:
raise Http404
override_field_for_ccx(
ccx, course, 'grading_policy', json.loads(request.POST['policy']))
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
def validate_date(year, month, day, hour, minute):
"""
avoid corrupting db if bad dates come in
"""
valid = True
if year < 0:
valid = False
if month < 1 or month > 12:
valid = False
if day < 1 or day > 31:
valid = False
if hour < 0 or hour > 23:
valid = False
if minute < 0 or minute > 59:
valid = False
return valid
def parse_date(datestring):
"""
Generate a UTC datetime.datetime object from a string of the form
'YYYY-MM-DD HH:MM'. If string is empty or `None`, returns `None`.
"""
if datestring:
date, time = datestring.split(' ')
year, month, day = map(int, date.split('-'))
hour, minute = map(int, time.split(':'))
if validate_date(year, month, day, hour, minute):
return datetime.datetime(
year, month, day, hour, minute, tzinfo=pytz.UTC)
return None
def get_ccx_for_coach(course, coach):
"""
Looks to see if user is coach of a CCX for this course. Returns the CCX or
None.
"""
ccxs = CustomCourseForEdX.objects.filter(
course_id=course.id,
coach=coach
)
# XXX: In the future, it would be nice to support more than one ccx per
# coach per course. This is a place where that might happen.
if ccxs.exists():
return ccxs[0]
return None
def get_ccx_schedule(course, ccx):
"""
Generate a JSON serializable CCX schedule.
"""
def visit(node, depth=1):
"""
Recursive generator function which yields CCX schedule nodes.
We convert dates to string to get them ready for use by the js date
widgets, which use text inputs.
"""
for child in node.get_children():
start = get_override_for_ccx(ccx, child, 'start', None)
if start:
start = str(start)[:-9]
due = get_override_for_ccx(ccx, child, 'due', None)
if due:
due = str(due)[:-9]
hidden = get_override_for_ccx(
ccx, child, 'visible_to_staff_only',
child.visible_to_staff_only)
visited = {
'location': str(child.location),
'display_name': child.display_name,
'category': child.category,
'start': start,
'due': due,
'hidden': hidden,
}
if depth < 3:
children = tuple(visit(child, depth + 1))
if children:
visited['children'] = children
yield visited
else:
yield visited
with disable_overrides():
return tuple(visit(course))
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_schedule(request, course, ccx=None): # pylint: disable=unused-argument
"""
get json representation of ccx schedule
"""
if not ccx:
raise Http404
schedule = get_ccx_schedule(course, ccx)
json_schedule = json.dumps(schedule, indent=4)
return HttpResponse(json_schedule, mimetype='application/json')
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_invite(request, course, ccx=None):
"""
Invite users to new ccx
"""
if not ccx:
raise Http404
action = request.POST.get('enrollment-button')
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
auto_enroll = True if 'auto-enroll' in request.POST else False
email_students = True if 'email-students' in request.POST else False
for identifier in identifiers:
user = None
email = None
try:
user = get_student_from_identifier(identifier)
except User.DoesNotExist:
email = identifier
else:
email = user.email
try:
validate_email(email)
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
email_params = get_email_params(course, auto_enroll, course_key=course_key, display_name=ccx.display_name)
if action == 'Enroll':
enroll_email(
course_key,
email,
auto_enroll=auto_enroll,
email_students=email_students,
email_params=email_params
)
if action == "Unenroll":
unenroll_email(course_key, email, email_students=email_students, email_params=email_params)
except ValidationError:
log.info('Invalid user name or email when trying to invite students: %s', email)
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': CCXLocator.from_course_locator(course.id, ccx.id)}
)
return redirect(url)
@ensure_csrf_cookie
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_student_management(request, course, ccx=None):
"""Manage the enrollment of individual students in a CCX
"""
if not ccx:
raise Http404
action = request.POST.get('student-action', None)
student_id = request.POST.get('student-id', '')
user = email = None
try:
user = get_student_from_identifier(student_id)
except User.DoesNotExist:
email = student_id
else:
email = user.email
course_key = CCXLocator.from_course_locator(course.id, ccx.id)
try:
validate_email(email)
if action == 'add':
# by decree, no emails sent to students added this way
# by decree, any students added this way are auto_enrolled
enroll_email(course_key, email, auto_enroll=True, email_students=False)
elif action == 'revoke':
unenroll_email(course_key, email, email_students=False)
except ValidationError:
log.info('Invalid user name or email when trying to enroll student: %s', email)
url = reverse(
'ccx_coach_dashboard',
kwargs={'course_id': course_key}
)
return redirect(url)
@contextmanager
def ccx_course(ccx_locator):
"""Create a context in which the course identified by course_locator exists
"""
course = get_course_by_id(ccx_locator)
yield course
def prep_course_for_grading(course, request):
"""Set up course module for overrides to function properly"""
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id, request.user, course, depth=2)
course = get_module_for_descriptor(
request.user, request, course, field_data_cache, course.id, course=course
)
course._field_data_cache = {} # pylint: disable=protected-access
course.set_grading_policy(course.grading_policy)
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_gradebook(request, course, ccx=None):
"""
Show the gradebook for this CCX.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
student_info = [
{
'username': student.username,
'id': student.id,
'email': student.email,
'grade_summary': student_grades(student, request, course),
'realname': student.profile.name,
}
for student in enrolled_students
]
return render_to_response('courseware/gradebook.html', {
'students': student_info,
'course': course,
'course_id': course.id,
'staff_access': request.user.is_staff,
'ordered_grades': sorted(
course.grade_cutoffs.items(), key=lambda i: i[1], reverse=True),
})
@cache_control(no_cache=True, no_store=True, must_revalidate=True)
@coach_dashboard
def ccx_grades_csv(request, course, ccx=None):
"""
Download grades as CSV.
"""
if not ccx:
raise Http404
ccx_key = CCXLocator.from_course_locator(course.id, ccx.id)
with ccx_course(ccx_key) as course:
prep_course_for_grading(course, request)
enrolled_students = User.objects.filter(
courseenrollment__course_id=ccx_key,
courseenrollment__is_active=1
).order_by('username').select_related("profile")
grades = iterate_grades_for(course, enrolled_students)
header = None
rows = []
for student, gradeset, __ in grades:
if gradeset:
# We were able to successfully grade this student for this
# course.
if not header:
# Encode the header row in utf-8 encoding in case there are
# unicode characters
header = [section['label'].encode('utf-8')
for section in gradeset[u'section_breakdown']]
rows.append(["id", "email", "username", "grade"] + header)
percents = {
section['label']: section.get('percent', 0.0)
for section in gradeset[u'section_breakdown']
if 'label' in section
}
row_percents = [percents.get(label, 0.0) for label in header]
rows.append([student.id, student.email, student.username,
gradeset['percent']] + row_percents)
buf = StringIO()
writer = csv.writer(buf)
for row in rows:
writer.writerow(row)
return HttpResponse(buf.getvalue(), content_type='text/plain')
|
zofuthan/edx-platform
|
lms/djangoapps/ccx/views.py
|
Python
|
agpl-3.0
| 20,122
|
[
"VisIt"
] |
a0e1269dcd8a19f205832926a1e364c3c66dc6fba6300464321f4c61118b171f
|
#!/usr/bin/env python
import shutil
import tempfile
import configparser
from textwrap import dedent
import tarfile
import pyaml
import hashlib
import os
import re
import bs4
import urllib
from urllib import request
from urllib import parse
from urllib import error
from collections import OrderedDict
import logging
import requests
logging.basicConfig(level=logging.INFO, format='[bioconductor_skeleton.py %(asctime)s]: %(message)s')
logger = logging.getLogger()
logging.getLogger("requests").setLevel(logging.WARNING)
base_url = 'http://bioconductor.org/packages/'
# Packages that might be specified in the DESCRIPTION of a package as
# dependencies, but since they're built-in we don't need to specify them in
# the meta.yaml.
#
# Note: this list is from:
#
# conda create -n rtest -c r r
# R -e "rownames(installed.packages())"
BASE_R_PACKAGES = ["base", "boot", "class", "cluster", "codetools", "compiler",
"datasets", "foreign", "graphics", "grDevices", "grid",
"KernSmooth", "lattice", "MASS", "Matrix", "methods",
"mgcv", "nlme", "nnet", "parallel", "rpart", "spatial",
"splines", "stats", "stats4", "survival", "tcltk", "tools",
"utils"]
# A list of packages, in recipe name format
GCC_PACKAGES = ['r-rcpp']
HERE = os.path.abspath(os.path.dirname(__file__))
class PageNotFoundError(Exception): pass
class BioCProjectPage(object):
def __init__(self, package):
"""
Represents a single Bioconductor package page and provides access to
scraped data.
>>> x = BioCProjectPage('DESeq2')
>>> x.tarball_url
'http://bioconductor.org/packages/release/bioc/src/contrib/DESeq2_1.8.2.tar.gz'
"""
self.base_url = base_url
self.package = package
self._md5 = None
self._cached_tarball = None
self._dependencies = None
self.build_number = 0
self.request = requests.get(os.path.join(base_url, package))
if not self.request:
raise PageNotFoundError('Error {0.status_code} ({0.reason})'.format(self.request))
# Since we provide the "short link" we will get redirected. Using
# requests allows us to keep track of the final destination URL, which
# we need for reconstructing the tarball URL.
self.url = self.request.url
# The table at the bottom of the page has the info we want. An earlier
# draft of this script parsed the dependencies from the details table.
# That's still an option if we need a double-check on the DESCRIPTION
# fields.
self.soup = bs4.BeautifulSoup(
self.request.content,
'html.parser')
self.details_table = self.soup.find_all(attrs={'class': 'details'})[0]
# However, it is helpful to get the version info from this table. That
# way we can try getting the bioaRchive tarball and cache that.
for td in self.details_table.findAll('td'):
if td.getText() == 'Version':
version = td.findNext().getText()
break
self.version = version
self.depends_on_gcc = False
@property
def bioaRchive_url(self):
"""
Returns the bioaRchive URL if one exists for this version of this
package, otherwise returns None.
Note that to get the package version, we're still getting the
bioconductor tarball to extract the DESCRIPTION file.
"""
url = 'https://bioarchive.galaxyproject.org/{0.package}_{0.version}.tar.gz'.format(self)
response = requests.get(url)
if response:
return url
elif response.status_code == 404:
return
else:
raise PageNotFoundError("Unexpected error: {0.status_code} ({0.reason})".format(response))
@property
def bioconductor_tarball_url(self):
"""
Return the url to the tarball from the bioconductor site.
"""
r = re.compile('{0}.*\.tar.gz'.format(self.package))
def f(href):
return href and r.search(href)
results = self.soup.find_all(href=f)
assert len(results) == 1, (
"Found {0} tags with '.tar.gz' in href".format(len(results)))
s = list(results[0].stripped_strings)
assert len(s) == 1
# build the actual URL based on the identified package name and the
# relative URL from the source. Here we're just hard-coding
# '../src/contrib' based on the structure of the bioconductor site.
return os.path.join(parse.urljoin(self.url, '../src/contrib'), s[0])
@property
def tarball_url(self):
url = self.bioaRchive_url
if url:
return url
return self.bioconductor_tarball_url
@property
def tarball_basename(self):
return os.path.basename(self.tarball_url)
@property
def cached_tarball(self):
"""
Downloads the tarball to the `cached_bioconductor_tarballs` dir if one
hasn't already been downloaded for this package.
This is because we need the whole tarball to get the DESCRIPTION file
and to generate an md5 hash, so we might as well save it somewhere.
"""
if self._cached_tarball:
return self._cached_tarball
cache_dir = os.path.join(HERE, 'cached_bioconductor_tarballs')
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
fn = os.path.join(cache_dir, self.tarball_basename)
if os.path.exists(fn):
self._cached_tarball = fn
return fn
tmp = tempfile.NamedTemporaryFile(delete=False).name
with open(tmp, 'wb') as fout:
logger.info('Downloading {0} to {1}'.format(self.tarball_url, fn))
response = requests.get(self.tarball_url)
if response:
fout.write(response.content)
else:
raise PageNotFoundError('Unexpected error {0.status_code} ({0.reason})'.format(response))
shutil.move(tmp, fn)
self._cached_tarball = fn
return fn
@property
def description(self):
"""
Extract the DESCRIPTION file from the tarball and parse it.
"""
t = tarfile.open(self.cached_tarball)
d = t.extractfile(os.path.join(self.package, 'DESCRIPTION')).read()
self._contents = d
c = configparser.ConfigParser(strict=False)
# On-spec config files need a "section", but the DESCRIPTION file
# doesn't have one. So we just add a fake section, and let the
# configparser take care of the details of parsing.
c.read_string('[top]\n' + d.decode('UTF-8'))
e = c['top']
# Glue together newlines
for k in e.keys():
e[k] = e[k].replace('\n', ' ')
return dict(e)
#@property
#def version(self):
# return self.description['version']
@property
def license(self):
return self.description['license']
@property
def imports(self):
try:
return self.description['imports'].split(', ')
except KeyError:
return []
@property
def depends(self):
try:
return self.description['depends'].split(', ')
except KeyError:
return []
def _parse_dependencies(self, items):
"""
The goal is to go from
['package1', 'package2', 'package3 (>= 0.1)', 'package4']
to::
[
('package1', ""),
('package2', ""),
('package3', " >=0.1"),
('package1', ""),
]
"""
results = []
for item in items:
toks = [i.strip() for i in item.split('(')]
if len(toks) == 1:
results.append((toks[0], ""))
elif len(toks) == 2:
assert ')' in toks[1]
toks[1] = toks[1].replace(')', '').replace(' ', '')
results.append(tuple(toks))
else:
raise ValueError("Found {0} toks: {1}".format(len(toks), toks))
return results
@property
def dependencies(self):
if self._dependencies:
return self._dependencies
results = []
# Some packages specify a minimum R version, which we'll need to keep
# track of
specific_r_version = False
# Sometimes a version is specified only in the `depends` and not in the
# `imports`. We keep the most specific version of each.
version_specs = list(
set(
self._parse_dependencies(self.imports) +
self._parse_dependencies(self.depends)
)
)
versions = {}
for name, version in version_specs:
if name in versions:
if not versions[name] and version:
versions[name] = version
else:
versions[name] = version
for name, version in sorted(versions.items()):
# DESCRIPTION notes base R packages, but we don't need to specify
# them in the dependencies.
if name in BASE_R_PACKAGES:
continue
# Try finding the dependency on the bioconductor site; if it can't
# be found then we assume it's in CRAN.
try:
BioCProjectPage(name)
prefix = 'bioconductor-'
except PageNotFoundError:
prefix = 'r-'
logger.info('{0:>12} dependency: name="{1}" version="{2}"'.format(
{'r-': 'R', 'bioconductor-': 'BioConductor'}[prefix],
name, version))
# add padding to version string
if version:
version = " " + version
if name.lower() == 'r':
# Had some issues with CONDA_R finding the right version if "r"
# had version restrictions. Since we're generally building
# up-to-date packages, we can just use "r".
# # "r >=2.5" rather than "r-r >=2.5"
# specific_r_version = True
# results.append(name.lower() + version)
# results.append('r')
pass
else:
results.append(prefix + name.lower() + version)
if prefix + name.lower() in GCC_PACKAGES:
self.depends_on_gcc = True
# Add R itself
results.append('r')
self._dependencies = results
return self._dependencies
@property
def md5(self):
"""
Calculate the md5 hash of the tarball so it can be filled into the
meta.yaml.
"""
if self._md5 is None:
self._md5 = hashlib.md5(
open(self.cached_tarball, 'rb').read()).hexdigest()
return self._md5
@property
def meta_yaml(self):
"""
Build the meta.yaml string based on discovered values.
Here we use a nested OrderedDict so that all meta.yaml files created by
this script have the same consistent format. Otherwise we're at the
mercy of Python dict sorting.
We use pyaml (rather than yaml) because it has better handling of
OrderedDicts.
However pyaml does not support comments, but if there are gcc and llvm
dependencies then they need to be added with preprocessing selectors
for `# [linux]` and `# [osx]`.
We do this with a unique placeholder (not a jinja or $-based
string.Template so as to avoid conflicting with the conda jinja
templating or the `$R` in the test commands, and replace the text once
the yaml is written.
"""
url = self.bioaRchive_url
if not url:
url = self.tarball_url
DEPENDENCIES = sorted(self.dependencies)
d = OrderedDict((
(
'package', OrderedDict((
('name', 'bioconductor-' + self.package.lower()),
('version', self.version),
)),
),
(
'source', OrderedDict((
('fn', self.tarball_basename),
('url', url),
('md5', self.md5),
)),
),
(
'build', OrderedDict((
('number', self.build_number),
('rpaths', ['lib/R/lib/', 'lib/']),
)),
),
(
'requirements', OrderedDict((
# If you don't make copies, pyaml sees these as the same
# object and tries to make a shortcut, causing an error in
# decoding unicode. Possible pyaml bug? Anyway, this fixes
# it.
('build', DEPENDENCIES[:]),
('run', DEPENDENCIES[:]),
)),
),
(
'test', OrderedDict((
('commands',
['''$R -e "library('{package}')"'''.format(
package=self.package)]),
)),
),
(
'about', OrderedDict((
('home', self.url),
('license', self.license),
('summary', self.description['description']),
)),
),
))
if self.depends_on_gcc:
d['requirements']['build'].append('GCC_PLACEHOLDER')
d['requirements']['build'].append('LLVM_PLACEHOLDER')
rendered = pyaml.dumps(d).decode('utf-8')
rendered = rendered.replace('GCC_PLACEHOLDER', 'gcc # [linux]')
rendered = rendered.replace('LLVM_PLACEHOLDER', 'llvm # [osx]')
return rendered
def write_recipe(package, recipe_dir, force=False):
"""
Write the meta.yaml and build.sh files.
"""
proj = BioCProjectPage(package)
recipe_dir = os.path.join(recipe_dir, 'bioconductor-' + proj.package.lower())
if os.path.exists(recipe_dir) and not force:
raise ValueError("{0} already exists, aborting".format(recipe_dir))
else:
if not os.path.exists(recipe_dir):
print('creating %s' % recipe_dir)
os.makedirs(recipe_dir)
# If the version number has not changed but something else in the recipe
# *has* changed, then bump the version number.
meta_file = os.path.join(recipe_dir, 'meta.yaml')
if os.path.exists(meta_file):
updated_meta = pyaml.yaml.load(proj.meta_yaml)
current_meta = pyaml.yaml.load(open(meta_file))
# pop off the version and build numbers so we can compare the rest of
# the dicts
updated_version = updated_meta['package'].pop('version')
current_version = current_meta['package'].pop('version')
updated_build_number = updated_meta['build'].pop('number')
current_build_number = current_meta['build'].pop('number')
if (
(updated_version == current_version)
and
(updated_meta != current_meta)
):
proj.build_number = int(current_build_number) + 1
with open(os.path.join(recipe_dir, 'meta.yaml'), 'w') as fout:
fout.write(proj.meta_yaml)
with open(os.path.join(recipe_dir, 'build.sh'), 'w') as fout:
fout.write(dedent(
"""
#!/bin/bash
# R refuses to build packages that mark themselves as
# "Priority: Recommended"
mv DESCRIPTION DESCRIPTION.old
grep -v '^Priority: ' DESCRIPTION.old > DESCRIPTION
#
$R CMD INSTALL --build .
#
# # Add more build steps here, if they are necessary.
#
# See
# http://docs.continuum.io/conda/build.html
# for a list of environment variables that are set during the build
# process.
# """
)
)
if __name__ == "__main__":
import argparse
ap = argparse.ArgumentParser()
ap.add_argument('package', help='Bioconductor package name')
ap.add_argument('--recipes', default='recipes',
help='Recipe will be created in <recipe-dir>/<package>')
ap.add_argument('--force', action='store_true',
help='Overwrite the contents of an existing recipe')
args = ap.parse_args()
write_recipe(args.package, args.recipes, args.force)
|
ThomasWollmann/bioconda-recipes
|
scripts/bioconductor/bioconductor_skeleton.py
|
Python
|
mit
| 16,637
|
[
"Bioconductor"
] |
9d65f4df3bcf6e0a061d7e829fb74c3a88ea06d5249ed4efc158784ed94c3d30
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Eric Larson <larson.eric.d@gmail.com>
# Guillaume Favelier <guillaume.favelier@gmail.com>
#
# License: Simplified BSD
import contextlib
from functools import partial
import os
import sys
import time
import traceback
import warnings
import numpy as np
from scipy import sparse
from ._brain import _Brain
from .callback import (ShowView, IntSlider, TimeSlider, SmartSlider,
BumpColorbarPoints, UpdateColorbarScale)
from .mplcanvas import MplCanvas
from .view import _lh_views_dict
from ..utils import _show_help, _get_color_list
from ...externals.decorator import decorator
from ...source_space import vertex_to_mni, _read_talxfm
from ...transforms import apply_trans
from ...utils import _ReuseCycle, warn, copy_doc, _validate_type
@decorator
def safe_event(fun, *args, **kwargs):
"""Protect against PyQt5 exiting on event-handling errors."""
try:
return fun(*args, **kwargs)
except Exception:
traceback.print_exc(file=sys.stderr)
class _TimeViewer(object):
"""Class to interact with _Brain."""
def __init__(self, brain, show_traces=False):
from ..backends._pyvista import _require_minimum_version
_require_minimum_version('0.24')
# shared configuration
if hasattr(brain, 'time_viewer'):
raise RuntimeError('brain already has a TimeViewer')
self.brain = brain
self.orientation = list(_lh_views_dict.keys())
self.default_smoothing_range = [0, 15]
# detect notebook
if brain._notebook:
self.notebook = True
self.configure_notebook()
return
else:
self.notebook = False
# Default configuration
self.playback = False
self.visibility = False
self.refresh_rate_ms = max(int(round(1000. / 60.)), 1)
self.default_scaling_range = [0.2, 2.0]
self.default_playback_speed_range = [0.01, 1]
self.default_playback_speed_value = 0.05
self.default_status_bar_msg = "Press ? for help"
all_keys = ('lh', 'rh', 'vol')
self.act_data_smooth = {key: (None, None) for key in all_keys}
self.color_cycle = None
self.mpl_canvas = None
self.picked_points = {key: list() for key in all_keys}
self.pick_table = dict()
self._mouse_no_mvt = -1
self.icons = dict()
self.actions = dict()
self.callbacks = dict()
self.sliders = dict()
self.keys = ('fmin', 'fmid', 'fmax')
self.slider_length = 0.02
self.slider_width = 0.04
self.slider_color = (0.43137255, 0.44313725, 0.45882353)
self.slider_tube_width = 0.04
self.slider_tube_color = (0.69803922, 0.70196078, 0.70980392)
# Direct access parameters:
self.brain.time_viewer = self
self.plotter = brain._renderer.plotter
self.main_menu = self.plotter.main_menu
self.window = self.plotter.app_window
self.tool_bar = self.window.addToolBar("toolbar")
self.status_bar = self.window.statusBar()
self.interactor = self.plotter.interactor
self.window.signal_close.connect(self.clean)
# Derived parameters:
self.playback_speed = self.default_playback_speed_value
_validate_type(show_traces, (bool, str, 'numeric'), 'show_traces')
self.interactor_fraction = 0.25
if isinstance(show_traces, str):
assert 'show_traces' == 'separate' # should be guaranteed earlier
self.show_traces = True
self.separate_canvas = True
else:
if isinstance(show_traces, bool):
self.show_traces = show_traces
else:
show_traces = float(show_traces)
if not 0 < show_traces < 1:
raise ValueError(
'show traces, if numeric, must be between 0 and 1, '
f'got {show_traces}')
self.show_traces = True
self.interactor_fraction = show_traces
self.separate_canvas = False
del show_traces
self._spheres = list()
self.load_icons()
self.configure_time_label()
self.configure_sliders()
self.configure_scalar_bar()
self.configure_playback()
self.configure_point_picking()
self.configure_menu()
self.configure_tool_bar()
self.configure_status_bar()
# show everything at the end
self.toggle_interface()
with self.ensure_minimum_sizes():
self.brain.show()
@contextlib.contextmanager
def ensure_minimum_sizes(self):
from ..backends._pyvista import _process_events
sz = self.brain._size
adjust_mpl = self.show_traces and not self.separate_canvas
if not adjust_mpl:
yield
else:
mpl_h = int(round((sz[1] * self.interactor_fraction) /
(1 - self.interactor_fraction)))
self.mpl_canvas.canvas.setMinimumSize(sz[0], mpl_h)
try:
yield
finally:
self.splitter.setSizes([sz[1], mpl_h])
_process_events(self.plotter)
_process_events(self.plotter)
self.mpl_canvas.canvas.setMinimumSize(0, 0)
_process_events(self.plotter)
_process_events(self.plotter)
# sizes could change, update views
for hemi in ('lh', 'rh'):
for ri, ci, v in self.brain._iter_views(hemi):
self.brain.show_view(view=v, row=ri, col=ci)
_process_events(self.plotter)
def toggle_interface(self, value=None):
if value is None:
self.visibility = not self.visibility
else:
self.visibility = value
# update tool bar icon
if self.visibility:
self.actions["visibility"].setIcon(self.icons["visibility_on"])
else:
self.actions["visibility"].setIcon(self.icons["visibility_off"])
# manage sliders
for slider in self.plotter.slider_widgets:
slider_rep = slider.GetRepresentation()
if self.visibility:
slider_rep.VisibilityOn()
else:
slider_rep.VisibilityOff()
# manage time label
time_label = self.brain._data['time_label']
# if we actually have time points, we will show the slider so
# hide the time actor
have_ts = self.brain._times is not None and len(self.brain._times) > 1
if self.time_actor is not None:
if self.visibility and time_label is not None and not have_ts:
self.time_actor.SetInput(time_label(self.brain._current_time))
self.time_actor.VisibilityOn()
else:
self.time_actor.VisibilityOff()
self.plotter.update()
def _save_movie(self, filename, **kwargs):
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QCursor
def frame_callback(frame, n_frames):
if frame == n_frames:
# On the ImageIO step
self.status_msg.setText(
"Saving with ImageIO: %s"
% filename
)
self.status_msg.show()
self.status_progress.hide()
self.status_bar.layout().update()
else:
self.status_msg.setText(
"Rendering images (frame %d / %d) ..."
% (frame + 1, n_frames)
)
self.status_msg.show()
self.status_progress.show()
self.status_progress.setRange(0, n_frames - 1)
self.status_progress.setValue(frame)
self.status_progress.update()
self.status_progress.repaint()
self.status_msg.update()
self.status_msg.parent().update()
self.status_msg.repaint()
# temporarily hide interface
default_visibility = self.visibility
self.toggle_interface(value=False)
# set cursor to busy
default_cursor = self.interactor.cursor()
self.interactor.setCursor(QCursor(Qt.WaitCursor))
try:
self.brain.save_movie(
filename=filename,
time_dilation=(1. / self.playback_speed),
callback=frame_callback,
**kwargs
)
except (Exception, KeyboardInterrupt):
warn('Movie saving aborted:\n' + traceback.format_exc())
# restore visibility
self.toggle_interface(value=default_visibility)
# restore cursor
self.interactor.setCursor(default_cursor)
@copy_doc(_Brain.save_movie)
def save_movie(self, filename=None, **kwargs):
try:
from pyvista.plotting.qt_plotting import FileDialog
except ImportError:
from pyvistaqt.plotting import FileDialog
if filename is None:
self.status_msg.setText("Choose movie path ...")
self.status_msg.show()
self.status_progress.setValue(0)
def _clean(unused):
del unused
self.status_msg.hide()
self.status_progress.hide()
dialog = FileDialog(
self.plotter.app_window,
callback=partial(self._save_movie, **kwargs)
)
dialog.setDirectory(os.getcwd())
dialog.finished.connect(_clean)
return dialog
else:
self._save_movie(filename=filename, **kwargs)
return
def apply_auto_scaling(self):
self.brain.update_auto_scaling()
for key in ('fmin', 'fmid', 'fmax'):
self.reps[key].SetValue(self.brain._data[key])
self.plotter.update()
def restore_user_scaling(self):
self.brain.update_auto_scaling(restore=True)
for key in ('fmin', 'fmid', 'fmax'):
self.reps[key].SetValue(self.brain._data[key])
self.plotter.update()
def toggle_playback(self, value=None):
if value is None:
self.playback = not self.playback
else:
self.playback = value
# update tool bar icon
if self.playback:
self.actions["play"].setIcon(self.icons["pause"])
else:
self.actions["play"].setIcon(self.icons["play"])
if self.playback:
time_data = self.brain._data['time']
max_time = np.max(time_data)
if self.brain._current_time == max_time: # start over
self.brain.set_time_point(0) # first index
self._last_tick = time.time()
def reset(self):
self.brain.reset_view()
max_time = len(self.brain._data['time']) - 1
if max_time > 0:
self.callbacks["time"](
self.brain._data["initial_time_idx"],
update_widget=True,
)
self.plotter.update()
def set_playback_speed(self, speed):
self.playback_speed = speed
@safe_event
def play(self):
if self.playback:
try:
self._advance()
except Exception:
self.toggle_playback(value=False)
raise
def _advance(self):
this_time = time.time()
delta = this_time - self._last_tick
self._last_tick = time.time()
time_data = self.brain._data['time']
times = np.arange(self.brain._n_times)
time_shift = delta * self.playback_speed
max_time = np.max(time_data)
time_point = min(self.brain._current_time + time_shift, max_time)
# always use linear here -- this does not determine the data
# interpolation mode, it just finds where we are (in time) in
# terms of the time indices
idx = np.interp(time_point, time_data, times)
self.callbacks["time"](idx, update_widget=True)
if time_point == max_time:
self.toggle_playback(value=False)
def set_slider_style(self):
for slider in self.sliders.values():
if slider is not None:
slider_rep = slider.GetRepresentation()
slider_rep.SetSliderLength(self.slider_length)
slider_rep.SetSliderWidth(self.slider_width)
slider_rep.SetTubeWidth(self.slider_tube_width)
slider_rep.GetSliderProperty().SetColor(self.slider_color)
slider_rep.GetTubeProperty().SetColor(self.slider_tube_color)
slider_rep.GetLabelProperty().SetShadow(False)
slider_rep.GetLabelProperty().SetBold(True)
slider_rep.GetLabelProperty().SetColor(self.brain._fg_color)
slider_rep.GetTitleProperty().ShallowCopy(
slider_rep.GetLabelProperty()
)
slider_rep.GetCapProperty().SetOpacity(0)
def configure_notebook(self):
from ._notebook import _NotebookInteractor
self.brain._renderer.figure.display = _NotebookInteractor(self)
def configure_time_label(self):
self.time_actor = self.brain._data.get('time_actor')
if self.time_actor is not None:
self.time_actor.SetPosition(0.5, 0.03)
self.time_actor.GetTextProperty().SetJustificationToCentered()
self.time_actor.GetTextProperty().BoldOn()
self.time_actor.VisibilityOff()
def configure_scalar_bar(self):
if self.brain._colorbar_added:
scalar_bar = self.plotter.scalar_bar
scalar_bar.SetOrientationToVertical()
scalar_bar.SetHeight(0.6)
scalar_bar.SetWidth(0.05)
scalar_bar.SetPosition(0.02, 0.2)
def configure_sliders(self):
# Orientation slider
# Use 'lh' as a reference for orientation for 'both'
if self.brain._hemi == 'both':
hemis_ref = ['lh']
else:
hemis_ref = self.brain._hemis
for hemi in hemis_ref:
for ri, ci, view in self.brain._iter_views(hemi):
orientation_name = f"orientation_{hemi}_{ri}_{ci}"
self.plotter.subplot(ri, ci)
if view == 'flat':
self.callbacks[orientation_name] = None
continue
self.callbacks[orientation_name] = ShowView(
plotter=self.plotter,
brain=self.brain,
orientation=self.orientation,
hemi=hemi,
row=ri,
col=ci,
)
self.sliders[orientation_name] = \
self.plotter.add_text_slider_widget(
self.callbacks[orientation_name],
value=0,
data=self.orientation,
pointa=(0.82, 0.74),
pointb=(0.98, 0.74),
event_type='always'
)
orientation_rep = \
self.sliders[orientation_name].GetRepresentation()
orientation_rep.ShowSliderLabelOff()
self.callbacks[orientation_name].slider_rep = orientation_rep
self.callbacks[orientation_name](view, update_widget=True)
# Put other sliders on the bottom right view
ri, ci = np.array(self.brain._subplot_shape) - 1
self.plotter.subplot(ri, ci)
# Smoothing slider
self.callbacks["smoothing"] = IntSlider(
plotter=self.plotter,
callback=self.brain.set_data_smoothing,
first_call=False,
)
self.sliders["smoothing"] = self.plotter.add_slider_widget(
self.callbacks["smoothing"],
value=self.brain._data['smoothing_steps'],
rng=self.default_smoothing_range, title="smoothing",
pointa=(0.82, 0.90),
pointb=(0.98, 0.90)
)
self.callbacks["smoothing"].slider_rep = \
self.sliders["smoothing"].GetRepresentation()
# Time slider
max_time = len(self.brain._data['time']) - 1
# VTK on macOS bombs if we create these then hide them, so don't
# even create them
if max_time < 1:
self.callbacks["time"] = None
self.sliders["time"] = None
else:
self.callbacks["time"] = TimeSlider(
plotter=self.plotter,
brain=self.brain,
first_call=False,
callback=self.plot_time_line,
)
self.sliders["time"] = self.plotter.add_slider_widget(
self.callbacks["time"],
value=self.brain._data['time_idx'],
rng=[0, max_time],
pointa=(0.23, 0.1),
pointb=(0.77, 0.1),
event_type='always'
)
self.callbacks["time"].slider_rep = \
self.sliders["time"].GetRepresentation()
# configure properties of the time slider
self.sliders["time"].GetRepresentation().SetLabelFormat(
'idx=%0.1f')
current_time = self.brain._current_time
assert current_time is not None # should never be the case, float
time_label = self.brain._data['time_label']
if callable(time_label):
current_time = time_label(current_time)
else:
current_time = time_label
if self.sliders["time"] is not None:
self.sliders["time"].GetRepresentation().SetTitleText(current_time)
if self.time_actor is not None:
self.time_actor.SetInput(current_time)
del current_time
# Playback speed slider
if self.sliders["time"] is None:
self.callbacks["playback_speed"] = None
self.sliders["playback_speed"] = None
else:
self.callbacks["playback_speed"] = SmartSlider(
plotter=self.plotter,
callback=self.set_playback_speed,
)
self.sliders["playback_speed"] = self.plotter.add_slider_widget(
self.callbacks["playback_speed"],
value=self.default_playback_speed_value,
rng=self.default_playback_speed_range, title="speed",
pointa=(0.02, 0.1),
pointb=(0.18, 0.1),
event_type='always'
)
self.callbacks["playback_speed"].slider_rep = \
self.sliders["playback_speed"].GetRepresentation()
# Colormap slider
pointa = np.array((0.82, 0.26))
pointb = np.array((0.98, 0.26))
shift = np.array([0, 0.1])
for idx, key in enumerate(self.keys):
title = "clim" if not idx else ""
rng = _get_range(self.brain)
self.callbacks[key] = BumpColorbarPoints(
plotter=self.plotter,
brain=self.brain,
name=key
)
self.sliders[key] = self.plotter.add_slider_widget(
self.callbacks[key],
value=self.brain._data[key],
rng=rng, title=title,
pointa=pointa + idx * shift,
pointb=pointb + idx * shift,
event_type="always",
)
# fscale
self.callbacks["fscale"] = UpdateColorbarScale(
plotter=self.plotter,
brain=self.brain,
)
self.sliders["fscale"] = self.plotter.add_slider_widget(
self.callbacks["fscale"],
value=1.0,
rng=self.default_scaling_range, title="fscale",
pointa=(0.82, 0.10),
pointb=(0.98, 0.10)
)
self.callbacks["fscale"].slider_rep = \
self.sliders["fscale"].GetRepresentation()
# register colorbar slider representations
self.reps = \
{key: self.sliders[key].GetRepresentation() for key in self.keys}
for name in ("fmin", "fmid", "fmax", "fscale"):
self.callbacks[name].reps = self.reps
# set the slider style
self.set_slider_style()
def configure_playback(self):
self.plotter.add_callback(self.play, self.refresh_rate_ms)
def configure_point_picking(self):
if not self.show_traces:
return
from ..backends._pyvista import _update_picking_callback
# use a matplotlib canvas
self.color_cycle = _ReuseCycle(_get_color_list())
win = self.plotter.app_window
dpi = win.windowHandle().screen().logicalDotsPerInch()
ratio = (1 - self.interactor_fraction) / self.interactor_fraction
w = self.interactor.geometry().width()
h = self.interactor.geometry().height() / ratio
# Get the fractional components for the brain and mpl
self.mpl_canvas = MplCanvas(self, w / dpi, h / dpi, dpi)
xlim = [np.min(self.brain._data['time']),
np.max(self.brain._data['time'])]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=UserWarning)
self.mpl_canvas.axes.set(xlim=xlim)
if not self.separate_canvas:
from PyQt5.QtWidgets import QSplitter
from PyQt5.QtCore import Qt
canvas = self.mpl_canvas.canvas
vlayout = self.plotter.frame.layout()
vlayout.removeWidget(self.interactor)
self.splitter = splitter = QSplitter(
orientation=Qt.Vertical, parent=self.plotter.frame)
vlayout.addWidget(splitter)
splitter.addWidget(self.interactor)
splitter.addWidget(canvas)
self.mpl_canvas.set_color(
bg_color=self.brain._bg_color,
fg_color=self.brain._fg_color,
)
self.mpl_canvas.show()
# get data for each hemi
for idx, hemi in enumerate(['vol', 'lh', 'rh']):
hemi_data = self.brain._data.get(hemi)
if hemi_data is not None:
act_data = hemi_data['array']
if act_data.ndim == 3:
act_data = np.linalg.norm(act_data, axis=1)
smooth_mat = hemi_data.get('smooth_mat')
vertices = hemi_data['vertices']
if hemi == 'vol':
assert smooth_mat is None
smooth_mat = sparse.csr_matrix(
(np.ones(len(vertices)),
(vertices, np.arange(len(vertices)))))
self.act_data_smooth[hemi] = (act_data, smooth_mat)
# plot the GFP
y = np.concatenate(list(v[0] for v in self.act_data_smooth.values()
if v[0] is not None))
y = np.linalg.norm(y, axis=0) / np.sqrt(len(y))
self.mpl_canvas.axes.plot(
self.brain._data['time'], y,
lw=3, label='GFP', zorder=3, color=self.brain._fg_color,
alpha=0.5, ls=':')
# now plot the time line
self.plot_time_line()
# then the picked points
for idx, hemi in enumerate(['lh', 'rh', 'vol']):
act_data = self.act_data_smooth.get(hemi, [None])[0]
if act_data is None:
continue
hemi_data = self.brain._data[hemi]
vertices = hemi_data['vertices']
# simulate a picked renderer
if self.brain._hemi in ('both', 'rh') or hemi == 'vol':
idx = 0
self.picked_renderer = self.plotter.renderers[idx]
# initialize the default point
if self.brain._data['initial_time'] is not None:
# pick at that time
use_data = act_data[
:, [np.round(self.brain._data['time_idx']).astype(int)]]
else:
use_data = act_data
ind = np.unravel_index(np.argmax(np.abs(use_data), axis=None),
use_data.shape)
if hemi == 'vol':
mesh = hemi_data['grid']
else:
mesh = hemi_data['mesh']
vertex_id = vertices[ind[0]]
self.add_point(hemi, mesh, vertex_id)
_update_picking_callback(
self.plotter,
self.on_mouse_move,
self.on_button_press,
self.on_button_release,
self.on_pick
)
def load_icons(self):
from PyQt5.QtGui import QIcon
from ..backends._pyvista import _init_resources
_init_resources()
self.icons["help"] = QIcon(":/help.svg")
self.icons["play"] = QIcon(":/play.svg")
self.icons["pause"] = QIcon(":/pause.svg")
self.icons["reset"] = QIcon(":/reset.svg")
self.icons["scale"] = QIcon(":/scale.svg")
self.icons["clear"] = QIcon(":/clear.svg")
self.icons["movie"] = QIcon(":/movie.svg")
self.icons["restore"] = QIcon(":/restore.svg")
self.icons["screenshot"] = QIcon(":/screenshot.svg")
self.icons["visibility_on"] = QIcon(":/visibility_on.svg")
self.icons["visibility_off"] = QIcon(":/visibility_off.svg")
def configure_tool_bar(self):
self.actions["screenshot"] = self.tool_bar.addAction(
self.icons["screenshot"],
"Take a screenshot",
self.plotter._qt_screenshot
)
self.actions["movie"] = self.tool_bar.addAction(
self.icons["movie"],
"Save movie...",
self.save_movie
)
self.actions["visibility"] = self.tool_bar.addAction(
self.icons["visibility_on"],
"Toggle Visibility",
self.toggle_interface
)
self.actions["play"] = self.tool_bar.addAction(
self.icons["play"],
"Play/Pause",
self.toggle_playback
)
self.actions["reset"] = self.tool_bar.addAction(
self.icons["reset"],
"Reset",
self.reset
)
self.actions["scale"] = self.tool_bar.addAction(
self.icons["scale"],
"Auto-Scale",
self.apply_auto_scaling
)
self.actions["restore"] = self.tool_bar.addAction(
self.icons["restore"],
"Restore scaling",
self.restore_user_scaling
)
self.actions["clear"] = self.tool_bar.addAction(
self.icons["clear"],
"Clear traces",
self.clear_points
)
self.actions["help"] = self.tool_bar.addAction(
self.icons["help"],
"Help",
self.help
)
self.actions["movie"].setShortcut("ctrl+shift+s")
self.actions["visibility"].setShortcut("i")
self.actions["play"].setShortcut(" ")
self.actions["scale"].setShortcut("s")
self.actions["restore"].setShortcut("r")
self.actions["clear"].setShortcut("c")
self.actions["help"].setShortcut("?")
def configure_menu(self):
# remove default picking menu
to_remove = list()
for action in self.main_menu.actions():
if action.text() == "Tools":
to_remove.append(action)
for action in to_remove:
self.main_menu.removeAction(action)
# add help menu
menu = self.main_menu.addMenu('Help')
menu.addAction('Show MNE key bindings\t?', self.help)
def configure_status_bar(self):
from PyQt5.QtWidgets import QLabel, QProgressBar
self.status_msg = QLabel(self.default_status_bar_msg)
self.status_progress = QProgressBar()
self.status_bar.layout().addWidget(self.status_msg, 1)
self.status_bar.layout().addWidget(self.status_progress, 0)
self.status_progress.hide()
def on_mouse_move(self, vtk_picker, event):
if self._mouse_no_mvt:
self._mouse_no_mvt -= 1
def on_button_press(self, vtk_picker, event):
self._mouse_no_mvt = 2
def on_button_release(self, vtk_picker, event):
if self._mouse_no_mvt > 0:
x, y = vtk_picker.GetEventPosition()
# programmatically detect the picked renderer
self.picked_renderer = self.plotter.iren.FindPokedRenderer(x, y)
# trigger the pick
self.plotter.picker.Pick(x, y, 0, self.picked_renderer)
self._mouse_no_mvt = 0
def on_pick(self, vtk_picker, event):
# vtk_picker is a vtkCellPicker
cell_id = vtk_picker.GetCellId()
mesh = vtk_picker.GetDataSet()
if mesh is None or cell_id == -1 or not self._mouse_no_mvt:
return # don't pick
# 1) Check to see if there are any spheres along the ray
if len(self._spheres):
collection = vtk_picker.GetProp3Ds()
found_sphere = None
for ii in range(collection.GetNumberOfItems()):
actor = collection.GetItemAsObject(ii)
for sphere in self._spheres:
if any(a is actor for a in sphere._actors):
found_sphere = sphere
break
if found_sphere is not None:
break
if found_sphere is not None:
assert found_sphere._is_point
mesh = found_sphere
# 2) Remove sphere if it's what we have
if hasattr(mesh, "_is_point"):
self.remove_point(mesh)
return
# 3) Otherwise, pick the objects in the scene
try:
hemi = mesh._hemi
except AttributeError: # volume
hemi = 'vol'
else:
assert hemi in ('lh', 'rh')
if self.act_data_smooth[hemi][0] is None: # no data to add for hemi
return
pos = np.array(vtk_picker.GetPickPosition())
if hemi == 'vol':
# VTK will give us the point closest to the viewer in the vol.
# We want to pick the point with the maximum value along the
# camera-to-click array, which fortunately we can get "just"
# by inspecting the points that are sufficiently close to the
# ray.
grid = mesh = self.brain._data[hemi]['grid']
vertices = self.brain._data[hemi]['vertices']
coords = self.brain._data[hemi]['grid_coords'][vertices]
scalars = grid.cell_arrays['values'][vertices]
spacing = np.array(grid.GetSpacing())
max_dist = np.linalg.norm(spacing) / 2.
origin = vtk_picker.GetRenderer().GetActiveCamera().GetPosition()
ori = pos - origin
ori /= np.linalg.norm(ori)
# the magic formula: distance from a ray to a given point
dists = np.linalg.norm(np.cross(ori, coords - pos), axis=1)
assert dists.shape == (len(coords),)
mask = dists <= max_dist
idx = np.where(mask)[0]
if len(idx) == 0:
return # weird point on edge of volume?
# useful for debugging the ray by mapping it into the volume:
# dists = dists - dists.min()
# dists = (1. - dists / dists.max()) * self.brain._cmap_range[1]
# grid.cell_arrays['values'][vertices] = dists * mask
idx = idx[np.argmax(np.abs(scalars[idx]))]
vertex_id = vertices[idx]
# Naive way: convert pos directly to idx; i.e., apply mri_src_t
# shape = self.brain._data[hemi]['grid_shape']
# taking into account the cell vs point difference (spacing/2)
# shift = np.array(grid.GetOrigin()) + spacing / 2.
# ijk = np.round((pos - shift) / spacing).astype(int)
# vertex_id = np.ravel_multi_index(ijk, shape, order='F')
else:
vtk_cell = mesh.GetCell(cell_id)
cell = [vtk_cell.GetPointId(point_id) for point_id
in range(vtk_cell.GetNumberOfPoints())]
vertices = mesh.points[cell]
idx = np.argmin(abs(vertices - pos), axis=0)
vertex_id = cell[idx[0]]
if vertex_id not in self.picked_points[hemi]:
self.add_point(hemi, mesh, vertex_id)
def add_point(self, hemi, mesh, vertex_id):
# skip if the wrong hemi is selected
if self.act_data_smooth[hemi][0] is None:
return
from ..backends._pyvista import _sphere
color = next(self.color_cycle)
line = self.plot_time_course(hemi, vertex_id, color)
if hemi == 'vol':
ijk = np.unravel_index(
vertex_id, np.array(mesh.GetDimensions()) - 1, order='F')
# should just be GetCentroid(center), but apparently it's VTK9+:
# center = np.empty(3)
# voxel.GetCentroid(center)
voxel = mesh.GetCell(*ijk)
pts = voxel.GetPoints()
n_pts = pts.GetNumberOfPoints()
center = np.empty((n_pts, 3))
for ii in range(pts.GetNumberOfPoints()):
pts.GetPoint(ii, center[ii])
center = np.mean(center, axis=0)
else:
center = mesh.GetPoints().GetPoint(vertex_id)
del mesh
# from the picked renderer to the subplot coords
rindex = self.plotter.renderers.index(self.picked_renderer)
row, col = self.plotter.index_to_loc(rindex)
actors = list()
spheres = list()
for ri, ci, _ in self.brain._iter_views(hemi):
self.plotter.subplot(ri, ci)
# Using _sphere() instead of renderer.sphere() for 2 reasons:
# 1) renderer.sphere() fails on Windows in a scenario where a lot
# of picking requests are done in a short span of time (could be
# mitigated with synchronization/delay?)
# 2) the glyph filter is used in renderer.sphere() but only one
# sphere is required in this function.
actor, sphere = _sphere(
plotter=self.plotter,
center=np.array(center),
color=color,
radius=4.0,
)
actors.append(actor)
spheres.append(sphere)
# add metadata for picking
for sphere in spheres:
sphere._is_point = True
sphere._hemi = hemi
sphere._line = line
sphere._actors = actors
sphere._color = color
sphere._vertex_id = vertex_id
self.picked_points[hemi].append(vertex_id)
self._spheres.extend(spheres)
self.pick_table[vertex_id] = spheres
def remove_point(self, mesh):
vertex_id = mesh._vertex_id
if vertex_id not in self.pick_table:
return
hemi = mesh._hemi
color = mesh._color
spheres = self.pick_table[vertex_id]
spheres[0]._line.remove()
self.mpl_canvas.update_plot()
self.picked_points[hemi].remove(vertex_id)
with warnings.catch_warnings(record=True):
# We intentionally ignore these in case we have traversed the
# entire color cycle
warnings.simplefilter('ignore')
self.color_cycle.restore(color)
for sphere in spheres:
# remove all actors
self.plotter.remove_actor(sphere._actors)
sphere._actors = None
self._spheres.pop(self._spheres.index(sphere))
self.pick_table.pop(vertex_id)
def clear_points(self):
for sphere in list(self._spheres): # will remove itself, so copy
self.remove_point(sphere)
assert sum(len(v) for v in self.picked_points.values()) == 0
assert len(self.pick_table) == 0
assert len(self._spheres) == 0
def plot_time_course(self, hemi, vertex_id, color):
if self.mpl_canvas is None:
return
time = self.brain._data['time'].copy() # avoid circular ref
if hemi == 'vol':
hemi_str = 'V'
xfm = _read_talxfm(
self.brain._subject_id, self.brain._subjects_dir)
if self.brain._units == 'm':
xfm['trans'][:3, 3] /= 1000.
ijk = np.unravel_index(
vertex_id, self.brain._data[hemi]['grid_shape'], order='F')
src_mri_t = self.brain._data[hemi]['grid_src_mri_t']
mni = apply_trans(np.dot(xfm['trans'], src_mri_t), ijk)
else:
hemi_str = 'L' if hemi == 'lh' else 'R'
mni = vertex_to_mni(
vertices=vertex_id,
hemis=0 if hemi == 'lh' else 1,
subject=self.brain._subject_id,
subjects_dir=self.brain._subjects_dir
)
label = "{}:{} MNI: {}".format(
hemi_str, str(vertex_id).ljust(6),
', '.join('%5.1f' % m for m in mni))
act_data, smooth = self.act_data_smooth[hemi]
if smooth is not None:
act_data = smooth[vertex_id].dot(act_data)[0]
else:
act_data = act_data[vertex_id].copy()
line = self.mpl_canvas.plot(
time,
act_data,
label=label,
lw=1.,
color=color,
zorder=4,
)
return line
def plot_time_line(self):
if self.mpl_canvas is None:
return
if isinstance(self.show_traces, bool) and self.show_traces:
# add time information
current_time = self.brain._current_time
if not hasattr(self, "time_line"):
self.time_line = self.mpl_canvas.plot_time_line(
x=current_time,
label='time',
color=self.brain._fg_color,
lw=1,
)
self.time_line.set_xdata(current_time)
self.mpl_canvas.update_plot()
def help(self):
pairs = [
('?', 'Display help window'),
('i', 'Toggle interface'),
('s', 'Apply auto-scaling'),
('r', 'Restore original clim'),
('c', 'Clear all traces'),
('Space', 'Start/Pause playback'),
]
text1, text2 = zip(*pairs)
text1 = '\n'.join(text1)
text2 = '\n'.join(text2)
_show_help(
col1=text1,
col2=text2,
width=5,
height=2,
)
def clear_callbacks(self):
for callback in self.callbacks.values():
if callback is not None:
if hasattr(callback, "plotter"):
callback.plotter = None
if hasattr(callback, "brain"):
callback.brain = None
if hasattr(callback, "slider_rep"):
callback.slider_rep = None
self.callbacks.clear()
@safe_event
def clean(self):
# resolve the reference cycle
self.clear_points()
self.clear_callbacks()
self.actions.clear()
self.sliders.clear()
self.reps = None
self.brain.time_viewer = None
self.brain = None
self.plotter = None
self.main_menu = None
self.window = None
self.tool_bar = None
self.status_bar = None
self.interactor = None
if self.mpl_canvas is not None:
self.mpl_canvas.clear()
self.mpl_canvas = None
self.time_actor = None
self.picked_renderer = None
for key in list(self.act_data_smooth.keys()):
self.act_data_smooth[key] = None
def _get_range(brain):
val = np.abs(np.concatenate(list(brain._current_act_data.values())))
return [np.min(val), np.max(val)]
def _normalize(point, shape):
return (point[0] / shape[1], point[1] / shape[0])
|
cjayb/mne-python
|
mne/viz/_brain/_timeviewer.py
|
Python
|
bsd-3-clause
| 39,988
|
[
"VTK"
] |
4a0a1ab6bb1c2ca3f5748f3130a7f3394b40104b37d57fa890c2a9f9f9c4cc28
|
# Copyright (C) 2008-2009 Open Society Institute
# Thomas Moroz: tmoroz.org
# 2010-2011 Large Blue
# Fergus Doyle: fergus.doyle@largeblue.com
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License Version 2 as published
# by the Free Software Foundation. You may not use, modify or distribute
# this program under any other version of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from datetime import datetime
import math
import os
from pprint import pformat
import logging
from zope.component import queryUtility
from repoze.bfg.interfaces import ISettings
from repoze.bfg.traversal import model_path
from repoze.bfg.traversal import find_interface
from repoze.folder.interfaces import IFolder
from repoze.lemonade.content import is_content
from opencore.models.interfaces import ICommunity
from opencore.utils import find_catalog
from opencore.utils import find_site
from opencore.utils import find_tags
from opencore.utils import find_users
log = logging.getLogger(__name__)
_NOW = None
def _now(): # unittests can replace this to get known results
return _NOW or datetime.now()
def postorder(startnode):
def visit(node):
if IFolder.providedBy(node):
for child in node.values():
for result in visit(child):
yield result
yield node
return visit(startnode)
def index_content(obj, event):
""" Index content (an IObjectAddedEvent subscriber) """
log.debug('index_content: obj=%s, event=%s' % (obj, event))
catalog = find_catalog(obj)
if catalog is not None:
for node in postorder(obj):
if is_content(obj):
path = model_path(node)
docid = getattr(node, 'docid', None)
if docid is None:
docid = node.docid = catalog.document_map.add(path)
else:
catalog.document_map.add(path, docid)
catalog.index_doc(docid, node)
def unindex_content(obj, docids):
""" Unindex given 'docids'.
"""
catalog = find_catalog(obj)
if catalog is not None:
for docid in docids:
catalog.unindex_doc(docid)
catalog.document_map.remove_docid(docid)
def cleanup_content_tags(obj, docids):
""" Remove any tags associated with 'docids'.
"""
tags = find_tags(obj)
if tags is not None:
for docid in docids:
tags.delete(item=docid)
def handle_content_removed(obj, event):
""" IObjectWillBeRemovedEvent subscriber.
"""
catalog = find_catalog(obj)
if catalog is not None:
path = model_path(obj)
num, docids = catalog.search(path={'query': path,
'include_path': True})
unindex_content(obj, docids)
cleanup_content_tags(obj, docids)
def reindex_content(obj, event):
""" Reindex a single piece of content (non-recursive); an
IObjectModifed event subscriber """
catalog = find_catalog(obj)
if catalog is not None:
path = model_path(obj)
docid = catalog.document_map.docid_for_address(path)
catalog.reindex_doc(docid, obj)
def set_modified(obj, event):
""" Set the modified date on a single piece of content.
This subscriber is non-recursive.
Intended use is as an IObjectModified event subscriber.
"""
if is_content(obj):
now = _now()
obj.modified = now
_modify_community(obj, now)
def set_created(obj, event):
""" Add modified and created attributes to obj and children.
Only add to content objects which do not yet have them (recursively)
Intended use is as an IObjectWillBeAddedEvent subscriber.
"""
now = _now()
for node in postorder(obj):
if is_content(obj):
if not getattr(node, 'modified', None):
node.modified = now
if not getattr(node, 'created', None):
node.created = now
parent = getattr(event, 'parent', None)
if parent is not None:
_modify_community(parent, now)
def _modify_community(obj, when):
# manage content_modified on community whenever a piece of content
# in a community is changed
community = find_interface(obj, ICommunity)
if community is not None:
community.content_modified = when
catalog = find_catalog(community)
if catalog is not None: # may not be wired into the site yet
index = catalog.get('content_modified')
if index is not None:
index.index_doc(community.docid, community)
def delete_community(obj, event):
# delete the groups related to the community when a community is
# deleted
context = obj
users = find_users(context)
users.delete_group(context.members_group_name)
users.delete_group(context.moderators_group_name)
# Add / remove list aliases from the root 'list_aliases' index.
def add_mailinglist(obj, event):
aliases = find_site(obj).list_aliases
aliases[obj.short_address] = model_path(obj.__parent__)
def remove_mailinglist(obj, event):
aliases = find_site(obj).list_aliases
try:
del aliases[obj.short_address]
except KeyError:
pass
# "Index" profile e-mails into the profiles folder.
def _remove_email(parent, name):
mapping = getattr(parent, 'email_to_name')
filtered = [x for x in mapping.items() if x[1] != name]
mapping.clear()
mapping.update(filtered)
def profile_added(obj, event):
parent = obj.__parent__
name = obj.__name__
_remove_email(parent, name)
parent.email_to_name[obj.email] = name
def profile_removed(obj, event):
parent = obj.__parent__
name = obj.__name__
_remove_email(parent, name)
class QueryLogger(object):
"""Event listener that logs ICatalogQueryEvents to a directory.
Performs 2 tasks:
1. Divides the log files by the magnitude of the query duration,
making it easy to find expensive queries.
2. Log all queries to a single file for comparison across systems
"""
def __init__(self):
self._configured = False
self.log_dir = None
self.min_duration = None
self.log_all = None
def configure(self, settings):
self.log_dir = getattr(settings, 'query_log_dir', None)
if self.log_dir:
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
self.min_duration = float(
getattr(settings, 'query_log_min_duration', 0.0))
self.log_all = bool(
getattr(settings, 'query_log_all', False))
self._configured = True
def __call__(self, event):
if not self._configured:
settings = queryUtility(ISettings)
if settings is not None:
self.configure(settings)
if not self.log_dir:
return
t = datetime.now().isoformat()
duration = event.duration
query = ' ' + pformat(event.query).replace('\n', '\n ')
if self.log_all:
self._log(t, duration, event.result[0], query)
if duration >= self.min_duration:
self._log_by_magnitude(t, duration, event.result[0], query)
def _log(self, ts, duration, num_results, query, fname='everything.log'):
msg = '%s %8.3f %6d\n%s\n' % (
ts, duration, num_results, query)
path = os.path.join(self.log_dir, fname)
f = open(path, 'a')
try:
f.write(msg)
finally:
f.close()
def _log_by_magnitude(self, ts, duration, num_results, query):
magnitude = math.ceil(math.log(duration, 2))
fn = '%07d.log' % int(1000 * 2**magnitude)
self._log(ts, duration, num_results, query, fname=fn)
log_query = QueryLogger()
|
damilare/opencore
|
opencore/models/subscribers.py
|
Python
|
gpl-2.0
| 8,339
|
[
"VisIt"
] |
28024d1c0bc0c86c83535375b0b41c3fb2f8b4a32f80f9f7a6068dcdc1291206
|
# Copyright (c) 2011, Chandler Armstrong (omni dot armstrong at gmail dot com)
# see LICENSE.txt for details
"""
polygon object
"""
from __future__ import division
from operator import mul
from numpy import array, cos, dot, fabs, lexsort, pi, sin, sqrt, vstack
from pygame import Rect
from .convexhull import convexhull
# error tolerances
_MACHEPS = pow(2, -24)
_E = _MACHEPS * 10
# utility functions
_clamp = lambda a, v, b: max(a, min(b, v)) # clamp v between a and b
_perp = lambda (x, y): array([-y, x]) # perpendicular
_prod = lambda X: reduce(mul, X) # product
_mag = lambda (x, y): sqrt(x * x + y * y) # magnitude, or length
_normalize = lambda V: array([i / _mag(V) for i in V]) # normalize a vector
_intersect = lambda A, B: (A[1] > B[0] and B[1] > A[0]) # intersection test
_unzip = lambda zipped: zip(*zipped) # unzip a list of tuples
def _isbetween(o, p, q):
# returns true if point p between points o and q
o_x, o_y = o
p_x, p_y = p
q_x, q_y = q
m = (q_y - o_y) / (q_x - o_x)
b = o_y - (m * o_x)
if fabs(p_y - ((m * p_x) + b)) < _MACHEPS: return True
class _Support(object):
# the support mapping of P - Q; s_P-Q
# s_P-Q is the generic support mapping for polygons
def __init__(self, P, Q):
s = self._s
self._s_P = s(P)
self._s_Q = s(Q)
self.M = []
def __repr__(self): return array([m for m in self])
def __len__(self): return len(self.M)
def __iter__(self): return iter(p - q for p, q in self.M)
def _s(self, C):
# returns a function that returns the support mapping of C
# the support mapping is the p in C such that
# dot(r, p) == dot(r, _s(C)(r))
# ie, the support mapping is the p in C most in the direction of r
return lambda r: max(dict((dot(r, p), p) for p in C).items())[1]
def add(self, r):
# add the value of s_P-Q(r) to self and return that value
# NOTE: return value is always a pair of vertices from P and Q
s_P, s_Q = self._s_P, self._s_Q
p, q = s_P(r), s_Q(-r)
self.M.append((p, q))
return p - q
def get(self, r):
# return value of s_P-Q(r)
# NOTE: return value is always a pair of vertices from P and Q
s_P, s_Q = self._s_P, self._s_Q
return s_P(r) - s_Q(-r)
def v(self, q=array([0, 0]), i=0):
# find the point on the convex hull of C closest to q by iteratively
# searching around voronoi regions
# i is the index of the initial test edge
# returns the point closest to q and sets self.M to be the minimum set
# of points in C such that q in conv(points)
A = array(list(self))
if len(A) > 1:
I = convexhull(A)
A = A[I]
C = Polygon(A, conv=False).move(*-q)
edges, n, P = C.edges, C.n, C.P
if n == 1: return P[0]
checked, inside = set(), set()
while 1:
checked.add(i)
edge = edges[i]
p = P[i]
len2 = dot(edge, edge) # len(edge)**2
vprj = dot(p, edge) # p projected onto edge
if vprj < 0: # q lies CW of edge
i = (i - 1) % n
if i in checked:
if not i in inside:
self.M = [self.M[I[i]]]
return p - q
i = (i - 1) % n
continue
if vprj > len2: # q lies CCW of edge
i = (i + 1) % n
if i in checked:
if not i in inside:
p = P[i]
self.M = [self.M[I[i]]]
return p - q
i = (i + 1) % n
continue
nprj = dot(p, _perp(edge)) # p projected onto edge normal
# perp of CCW edges will always point "outside"
if nprj > 0: # q is "inside" the edge
inside.add(i)
if len(checked) == n: return q # q in C
i = (i + 1) % n
continue
# q is closest to edge
self.M = [self.M[I[i]], self.M[I[(i + 1) % n]]]
edge_n = _normalize(edge)
# move from p to q projected on to edge
qprj = p - ((dot(p, edge_n)) * edge_n)
return qprj
class Polygon(object):
"""polygon object"""
def __init__(self, P, conv=True):
"""
arguments:
P -- iterable or 2d numpy.array of (x, y) points. the constructor will
find the convex hull of the points in CCW order; see the conv keyword
argument for details.
keyword arguments:
conv -- boolean indicating if the convex hull of P should be found.
conv is True by default. Polygon is intended for convex polygons only
and P must be in CCW order. conv will ensure that P is both convex
and in CCW. even if P is already convex, it is recommended to leave
conv True, unless client code can be sure that P is also in CCW order.
CCW order is requried for certain operations.
NOTE: the order must be with respect to a bottom left orgin; graphics
applications typically use a topleft origin. if your points are CCW
with respect to a topleft origin they will be CW in a bottomleft
origin
"""
P = array(list(P))
if conv: P = P[convexhull(P)]
self.P = P
n = len(P) # number of points
self.n = n
self.a = self._A() # area of polygon
edges = [] # an edge is the vector from p to q
for i, p in enumerate(P):
q = P[(i + 1) % n] # x, y of next point in series
edges.append(p - q)
self.edges = array(edges)
C = self.C
# longest distance from C for all p in P
self.rmax = sqrt(max(dot(C - p, C - p) for p in P))
def __len__(self): return self.n
def __getitem__(self, i): return self.P[i]
def __iter__(self): return iter(self.P)
def __repr__(self): return str(self.P)
def __add__(self, other):
"""
returns the minkowski sum of self and other
arguments:
other is a Polygon object
returns an array of points for the results of minkowski addition
NOTE: use the unary negation operator on other to find the so-called
minkowski difference. eg A + (-B)
"""
P, Q = self.P, other.P
return array([p + q for p in P for q in Q])
def __neg__(self): return Polygon(-self.P)
def get_rect(self):
"""return the AABB, as a pygame rect, of the polygon"""
X, Y = _unzip(self.P)
x, y = min(X), min(Y)
w, h = max(X) - x, max(Y) - y
return Rect(x, y, w, h)
def move(self, x, y):
"""return a new polygon moved by x, y"""
return Polygon([(x + p_x, y + p_y) for (p_x, p_y) in self.P])
def move_ip(self, x, y):
"""move the polygon by x, y"""
self.P = array([(x + p_x, y + p_y) for (p_x, p_y) in self.P])
def collidepoint(self, (x, y)):
"""
test if point (x, y) is outside, on the boundary, or inside polygon
uses raytracing algorithm
returns 0 if outside
returns -1 if on boundary
returns 1 if inside
"""
n, P = self.n, self.P
# test if (x, y) on a vertex
for p_x, p_y in P:
if (x == p_x) and (y == p_y): return -1
intersections = 0
for i, p in enumerate(self.P):
p_x, p_y = p
q_x, q_y = P[(i + 1) % n]
x_min, x_max = min(p_x, q_x), max(p_x, q_x)
y_min, y_max = min(p_y, q_y), max(p_y, q_y)
# test if (x, y) on horizontal boundary
if (p_y == q_y) and (p_y == y) and (x > x_min) and (x < x_max):
return -1
if (y > y_min) and (y <= y_max) and (x <= x_max) and (p_y != q_y):
x_inters = (((y - p_y) * (q_x - p_x)) / (q_y - p_y)) + p_x
# test if (x, y) on non-horizontal polygon boundary
if x_inters == x: return -1
# test if line from (x, y) intersects boundary
if p_x == q_x or x <= x_inters: intersections += 1
return intersections % 2
def collidepoly(self, other):
"""
test if other polygon collides with self using seperating axis theorem
if collision, return projections
arguments:
other -- a polygon object
returns:
an array of projections
"""
# a projection is a vector representing the span of a polygon projected
# onto an axis
projections = []
for edge in vstack((self.edges, other.edges)):
edge = _normalize(edge)
# the separating axis is the line perpendicular to the edge
axis = _perp(edge)
self_projection = self.project(axis)
other_projection = other.project(axis)
# if self and other do not intersect on any axis, they do not
# intersect in space
if not _intersect(self_projection, other_projection): return False
# find the overlapping portion of the projections
projection = self_projection[1] - other_projection[0]
projections.append((axis[0] * projection, axis[1] * projection))
return array(projections)
def distance(self, other, r=array([0, 0])):
"""
return distance between self and other
uses GJK algorithm. for details see:
Bergen, Gino Van Den. (1999). A fast and robust GJK implementation for
collision detection of convex objects. Journal of Graphics Tools 4(2).
arguments:
other -- a Polygon object
keyword arguments
r -- initial search direction; setting r to the movement vector of
self - other may speed convergence
"""
P, Q = self.P, other.P
support = _Support(P, Q) # support mapping function s_P-Q(r)
v = support.get(r) # initial support point
w = support.add(-v)
while dot(v, v) - dot(w, v) > _MACHEPS: # while w is closer to origin
v = support.v() # closest point to origin in support points
if len(support) == 3: return v # the origin is inside W; intersection
w = support.add(-v)
return v
def raycast(self, other, r, s=array([0, 0]), self_theta=0, other_theta=0):
"""
return the hit scalar, hit vector, and hit normal from self to other in
direction r
uses GJK-based raycast[1] modified to accomodate constant angular
rotation[2][3] without needing to recompute the Minkowski Difference
after each iteration[4].
[1] Bergen, Gino Van Den. (2004). Ray casting against general convex
objects with application to continuous collision detection. GDC 2005.
retrieved from
http://www.bulletphysics.com/ftp/pub/test/physics/papers/
jgt04raycast.pdf
on 6 July 2011.
[2] Coumans, Erwin. (2005). Continuous collision detection and physics.
retrieved from
http://www.continuousphysics.com/
BulletContinuousCollisionDetection.pdf
on 18 January 2012
[3] Mirtich, Brian Vincent. (1996). Impulse-based dynamic simulation of
rigid body systems. PhD Thesis. University of California at Berkely.
retrieved from
http://www.kuffner.org/james/software/dynamics/mirtich/
mirtichThesis.pdf
on 18 January 2012
[4] Behar, Evan and Jyh-Ming Lien. (2011). Dynamic Minkowski Sum of
convex shapes. In proceedings of IEEE ICRA 2011. retrieved from
http://masc.cs.gmu.edu/wiki/uploads/GeneralizedMsum/
icra11-dynsum-convex.pdf
on 18 January 2012.
arguments:
other -- Polygon object
r -- direction vector
NOTE: GJK searches IN THE DIRECTION of r, thus r needs to point
towards the origin with respect to the direction vector of self; in
other words, if r represents the movement of self then client code
should call raycast with -r.
keyword arguments:
s -- initial position along r, (0, 0) by default
theta -- angular velocity in radians
returns:
if r does not intersect other, returns False
else, returns the hit scalar, hit vector, and hit normal
hit scalar -- the scalar where r intersects other
hit vector -- the vector where self intersects other
hit normal -- the edge normal at the intersection
"""
self_rmax, other_rmax = self.rmax, other.rmax
# max arc length of rotation
# maximum radians for arc length is pi
L = ((self_rmax * abs(_clamp(-pi, self_theta, pi))) +
(other_rmax * abs(_clamp(-pi, other_theta, pi))))
# polygons for support function; copied because they will be rotated
A, B = Polygon(self), Polygon(other)
support = _Support(A, B) # support mapping function s_P-Q(r)
lambda_ = 0 # scalar of r to hit spot
q = s # current point along r
n = array([0, 0]) # hit normal at q
v = support.get(r) - q # vector from q to s_P-Q
p = support.add(-v) # support returns a v opposite of r
w = p - q
while dot(v, v) > _E * max(dot(p - q, p - q) for p in support):
if dot(v, w) > 0:
if (dot(v, r) <= 0) and (dot(v, v) > (L * L)): return False
n = -v
# update lambda
# translation distance lower bound := dot(v, w) / dot(v, r)
# angular rotation distance lower bound := L * (1 - lambda)
lambda_change = dot(v, w) / (dot(v, r) + (L * (1 - lambda_)))
lambda_ = lambda_ + lambda_change
if lambda_ > 1: return False
# interpolate lambda
q = s + (lambda_ * r) # translation
A.rotate_ip(lambda_change * self_theta) # rotation
B.rotate_ip(lambda_change * other_theta)
v = support.v(q) # closest point to q in support points
p = support.add(-v)
w = p - q
return lambda_, q, n
def _A(self):
# the area of polygon
n = self.n
P = self.P
X, Y = P[:, 0], P[:, 1]
return 0.5 * sum(X[i] * Y[(i + 1) % n] - X[(i + 1) % n] * Y[i]
for i in xrange(n))
@property
def C(self):
"""returns the centroid of the polygon"""
a, n = self.a, self.n
P = self.P
X, Y = _unzip(P)
if n == 1: return P[0]
if n == 2: return array([X[0] + X[1] / 2, Y[0] + Y[1] / 2])
c_x, c_y = 0, 0
for i in xrange(n):
a_i = X[i] * Y[(i + 1) % n] - X[(i + 1) % n] * Y[i]
c_x += (X[i] + X[(i + 1) % n]) * a_i
c_y += (Y[i] + Y[(i + 1) % n]) * a_i
b = 1 / (6 * a)
c_x *= b
c_y *= b
return array([c_x, c_y])
@C.setter
def C(self, (x, y)):
c_x, c_y = self.C
x, y = x - c_x, y - c_y
self.P = array([(p_x + x, p_y + y) for (p_x, p_y) in self.P])
def _rotate(self, x0, theta, origin=None):
if not origin: origin = self.C
origin = origin.reshape(2, 1)
x0 = x0.reshape(2, 1)
x0 = x0 - origin # assingment operator (-=) would modify original x0
A = array([[cos(theta), -sin(theta)], # rotation matrix
[sin(theta), cos(theta)]])
return (dot(A, x0) + origin).ravel()
def rotopoints(self, theta):
"""
returns an array of points rotated theta radians around the centroid
"""
P = self.P
rotate = self._rotate
return array([rotate(p, theta) for p in P])
def rotoedges(self, theta):
"""return an array of vectors of edges rotated theta radians"""
edges = self.edges
rotate = self._rotate
# edges, essentially angles, are always rotated around (0, 0)
return array([rotate(edge, theta, origin=array([0, 0]))
for edge in edges])
def rotate(self, theta): return Polygon(self.rotopoints(theta))
def rotate_ip(self, theta):
other = Polygon(self.rotopoints(theta))
self.P[:] = other.P
self.edges[:] = other.edges
def project(self, axis):
"""project self onto axis"""
P = self.P
projected_points = [dot(p, axis) for p in P]
# return the span of the projection
return min(projected_points), max(projected_points)
|
carsonfarmer/pylygon
|
pylygon/polygon.py
|
Python
|
gpl-3.0
| 17,520
|
[
"Brian"
] |
1ca2cf11ffce220431800a7a563f670ac9885a317dc8afd89965a38c0e4de18f
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Generic functions for use with GEOS-Chem's planeflight diagnostic module
"""
# compatibility with both python 2 and 3
from __future__ import print_function
# - Required modules:
# I/O functions / Low level
import sys
import csv
import glob
import pandas as pd
import logging
import numpy as np
# The below list to be updated, imports should be specific and in individual functions
# import tms modules with shared functions
from . core import *
from . generic import *
from . AC_time import *
from . variables import *
# Time
import datetime as datetime
def update_Planeflight_files(wd=None, num_tracers=103, verbose=True):
"""
Create new planeflight from old files (with updated # of tracers)
Parameters
-------
wd (str): the working (code) directory to search for files in
num_tracers (int): the number of tracers (TRA_???) to print in *dat files
Notes
-------
- Used for using existing planeflight output for campaign, but for
different output variables
"""
# --- Local variables
output_data_str = 'Now give the times and locations of the flight'
#
met_vars = [
'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND',
'GMAO_VWND'
]
assert isinstance(num_tracers, int), 'num_tracers must be an integer'
slist = ['TRA_{:0>3}'.format(i) for i in np.arange(1, num_tracers+1)]
species = ['OH', 'HO2']
slist = slist + species + met_vars
# slist = pf_var( fill_var_with_zeroes=True, ver=ver )
# --- Get files
# Get wd
if isinstance(wd, type(None)):
try:
wd = sys.argv[1]
except:
print('FAIL - Please provide working directory!')
# vars to use?
# read in files and extract locations.
files = glob.glob(wd+'Planeflight.dat*')
if verbose:
print(files)
# --- Loop existing files and extract data
dfs = []
for n_file, file in enumerate(files):
with open(file, 'r') as file_:
# loop variables
data_from_line_num = 9999
data = []
for n, line in enumerate(file_):
# get header
if ('Point' in line) and ('Type' in line):
header = [i.strip().upper() for i in line.split()]
data_from_line_num = n
# Break if passed all data
elif ('99999' in line) and ('END' in line):
break
elif (n > data_from_line_num):
data += [[i.strip() for i in line.split()]]
else:
pass
if len(data) > 0:
# Add datetime column, then add data to list of dataframes
df = pd.DataFrame(np.array(data), columns=header)
df['datetime'] = df['DD-MM-YYYY'].astype(
str)+df['HH:MM'].astype(str)
df['datetime'] = pd.to_datetime(df['datetime'],
format='%d-%m-%Y%H:%M')
dfs += [df]
else:
err_msg = 'WARNING: no data in {}'.format(file)
logging.info(err_msg)
print(err_msg)
# Concatenate extracted data
if verbose:
print(dfs[0])
df = pd.concat(dfs).sort_values('datetime', ascending=True)
if verbose:
print('FINAL!!!!', df)
# --- Print out new files based on processed DataFrame
prt_PlaneFlight_files(df=df, slist=slist, Extra_spacings=False)
def prt_PlaneFlight_files(df=None, LAT_var='LAT', LON_var='LON',
PRESS_var='PRESS', loc_var='TYPE',
Username='Tomas Sherwen',
Date_var='datetime', slist=None, num_tracers=85,
Extra_spacings=False,
verbose=False, debug=False):
"""
Takes a dataframe of lats, lons, alts, and times and makes Planeflight.dat.*
files
Parameters
-------
df (pd.DataFrame): dataframe of data (as floats) indexed by times(datetime)
wd (str): the working (code) directory to search for files in
loc_var (str): name for (e.g. plane name), could be more than one.
LAT_var, LON_var, PRESS_var (str): name for pressure(HPa),lat and lon in df
Date_var (str): column name of df containing datetime (UTC) variables
Username (str): name of the programme's user
Extra_spacings (bool): add extra spacing? (needed for large amounts of
output, like nested grids)
slist (list): list of tracers/species to output
Notes
-----
- to get output of a specific frequency for given point locations, just
add the times to the axis of the dataframe provided.
- datetime columns is required (as this allows mulitple output loations
(e.g. sepeerate planes/sites) to be present in input df)
- This function expects the dataframe to be ordered by datetime
"""
# --- Packages
from time import gmtime, strftime
import time
# --- Local variables
# Extra spaces need for runs with many points
if Extra_spacings:
pstr = '{:>6} {:>4} {:0>2}-{:0>2}-{:0>4} {:0>2}:{:0>2} {:>6,.2f} {:>7,.2f} {:>7.2f}'
endstr = '999999 END 0- 0- 0 0: 0 0.00 0.00 0.00'
else:
# pstr = '{:>5} {:<3} {:0>2}-{:0>2}-{:0>4} {:0>2}:{:0>2} {:>6,.2f} {:>7,.2f} {:>7.2f}'
# endstr ='99999 END 0- 0- 0 0: 0 0.00 0.00 0.00 '
pstr = '{:>5} {:>4} {:0>2}-{:0>2}-{:0>4} {:0>2}:{:0>2} {:>6,.2f} {:>7,.2f} {:>7.2f}'
endstr = '99999 END 00-00-0000 00:00 0.00 0.00 0.00'
# Output a general list of species/tracers/met vars if not provided as arguments
if isinstance(slist, type(None)):
met_vars = [
'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND', 'GMAO_VWND'
]
assert isinstance(num_tracers, int), 'num_tracers must be an integer'
slist = ['TRA_{:0>3}'.format(i) for i in np.arange(1, num_tracers+1)]
species = ['OH', 'HO2']
slist = slist + species + met_vars
# Number of variables to output (needed for fortran read of *dat files)
nvar = len(slist)
# --- work out how many (UTC) days in the output
# Get list of unique dates & remove mean from dates
dates = [datetime.datetime(*i.timetuple()[:3]) for i in df[Date_var]]
# Add list of just YYYYMMDD strings to dataframe
df['YYYYMMDD'] = [i.strftime('%Y%m%d') for i in dates]
# Get list of unique days
dates = np.ma.array(sorted(set(dates)))
# --- loop days and create the files
for date_ in dates:
# Get data for date
sub_df = df[df['YYYYMMDD'].values == date_.strftime('%Y%m%d')]
if verbose:
print('Entries for day ({}): '.format(date_), sub_df.shape)
# Create/Open up pf.dat setup
a = open('Planeflight.dat.'+date_.strftime('%Y%m%d'), 'w')
# Print out file headers to pf.dat file
print('Planeflight.dat -- input file for ND40 diagnostic GEOS_FP',
file=a)
print(Username, file=a)
print(strftime("%B %d %Y", gmtime()), file=a)
print('-----------------------------------------------', file=a)
print('{:<4}'.format(nvar), '! Number of variables to be output',
file=a)
print('-----------------------------------------------', file=a)
# Print out species for GEOS-Chem to output to pf.dat file
for n in range(0, len(slist)):
print(slist[n], file=a)
# Print out species for GEOS-Chem to output to pf.dat file
print('-------------------------------------------------', file=a)
print('Now give the times and locations of the flight', file=a)
print('-------------------------------------------------', file=a)
print('Point Type DD-MM-YYYY HH:MM LAT LON PRESS', file=a)
# Loop requested times
for n, time_ in enumerate(sub_df[Date_var]):
# Setup variable list to print
vars_ = [n+1, sub_df[loc_var].values[n], time_.day, time_.month]
vars_ += [time_.year, time_.hour, time_.minute]
# Extract lat, lon, and pressure for time
coord_vars = LAT_var, LON_var, PRESS_var
vars_ += [float(sub_df[i].values[n]) for i in coord_vars]
# Set formating
vars_ = pstr.format(*vars_)
# Print to file
print(vars_, file=a)
# Add footer to pf.dat file
print(endstr, file=a)
a.close()
def prt_PlaneFlight_files_v12_plus(df=None, LAT_var='LAT', LON_var='LON',
PRESS_var='PRESS', loc_var='TYPE',
OBS_var='OBS',
Date_var='datetime', slist=None,
num_tracers=85, rxn_nums=[],
Extra_spacings=False,
Username='Tomas Sherwen', verbose=False,
debug=False):
"""
Takes a dataframe of lats, lons, alts, and times and makes Planeflight.dat.*
files
Parameters
-------
df (pd.DataFrame): dataframe of data (as floats) indexed by times(datetime)
wd (str): the working (code) directory to search for files in
loc_var (str): name for (e.g. plane name), could be more than one.
LAT_var, LON_var, PRESS_var (str): name for pressure(HPa),lat and lon in df
Date_var (str): column name of df containing datetime (UTC) variables
Username (str): name of the programme's user
Extra_spacings (bool): add extra spacing? (needed for large amounts of
output, like nested grids)
slist (list): list of tracers/species to output
Notes
-----
- to get output of a specific frequency for given point locations, just
add the times to the axis of the dataframe provided.
- datetime columns is required (as this allows mulitple output loations
(e.g. sepeerate planes/sites) to be present in input df)
- This function expects the dataframe to be ordered by datetime
"""
# --- Packages
from time import gmtime, strftime
import time
# --- Local variables
# Extra spaces need for runs with many points
if Extra_spacings:
# pstr = '{:>6} {:>4} {:0>2}-{:0>2}-{:0>4} {:0>2}:{:0>2} {:>6,.2f} {:>7,.2f} {:>7.2f}'
# endstr = '999999 END 0- 0- 0 0: 0 0.00 0.00 0.00'
print('Extra_spacings not setup for >= v12.0.0')
sys.exit()
else:
# pstr = '{:>5} {:<3} {:0>2}-{:0>2}-{:0>4} {:0>2}:{:0>2} {:>6,.2f} {:>7,.2f} {:>7.2f}'
# endstr ='99999 END 0- 0- 0 0: 0 0.00 0.00 0.00 '
pstr = '{:>5}{:>7} {:0>2}-{:0>2}-{:0>4} {:0>2}:{:0>2} {:>6,.2f} {:>7,.2f} {:>7.2f} {:>10.3f}'
endstr = '99999 END 00-00-0000 00:00 0.00 0.00 0.00 0.00'
# Output a general list of species/tracers/met vars if not provided as arguments
if isinstance(slist, type(None)):
met_vars = [
'GMAO_ABSH', 'GMAO_PSFC', 'GMAO_SURF', 'GMAO_TEMP', 'GMAO_UWND',
'GMAO_VWND', 'GMAO_PRES'
]
assert isinstance(num_tracers, int), 'num_tracers must be an integer'
slist = ['TRA_{:0>3}'.format(i) for i in np.arange(1, num_tracers+1)]
species = ['OH', 'HO2']
slist = slist + species + met_vars
# Add list of reactions to extract too
if len(rxn_nums) > 0:
rxns_are_nums = [(type(i) == int) for i in rxn_nums]
assert all(rxns_are_nums), 'All rxn numbers must be integers!'
slist += ['REA_{:0>3}'.format(i) for i in rxn_nums]
# Number of variables to output (needed for fortran read of *dat files)
nvar = len(slist)
# --- Make sure an altitude is defined in df if not provided
# Updates merged into v12.0.0 mean a OBS altitude is required.
try:
df[OBS_var].values
except KeyError:
fill_ALT_obs = 99999.00
df[OBS_var] = fill_ALT_obs
# --- work out how many (UTC) days in the output
# Get list of unique dates & remove mean from dates
dates = [datetime.datetime(*i.timetuple()[:3]) for i in df[Date_var]]
# Add list of just YYYYMMDD strings to dataframe
df['YYYYMMDD'] = [i.strftime('%Y%m%d') for i in dates]
# Get list of unique days
dates = np.ma.array(sorted(set(dates)))
# --- loop days and create the files
for date_ in dates:
# Get data for date
sub_df = df[df['YYYYMMDD'].values == date_.strftime('%Y%m%d')]
if verbose:
print('Entries for day ({}): '.format(date_), sub_df.shape)
# Create/Open up pf.dat setup
a = open('Planeflight.dat.'+date_.strftime('%Y%m%d'), 'w')
# Print out file headers to pf.dat file
print('Planeflight.dat -- input file for ND40 diagnostic GEOS_FP',
file=a)
print(Username, file=a)
print(strftime("%B %d %Y", gmtime()), file=a)
print('-----------------------------------------------', file=a)
print('{:<4}'.format(nvar), '! Number of variables to be output',
file=a)
print('-----------------------------------------------', file=a)
# Print out species for GEOS-Chem to output to pf.dat file
for n in range(0, len(slist)):
print(slist[n], file=a)
# Print out species for GEOS-Chem to output to pf.dat file
print('-------------------------------------------------', file=a)
print('Now give the times and locations of the flight', file=a)
print('-------------------------------------------------', file=a)
header = [
'Point', 'Type', 'DD-MM-YYYY', 'HH:MM', 'LAT', 'LON', 'PRESS',
'OBS'
]
h_pstr = '{:>5}{:>7} {:>10} {:>5} {:>6} {:>7} {:>7} {:>10}'
print(h_pstr.format(*header), file=a)
# Loop requested times
for n, time_ in enumerate(sub_df[Date_var]):
# Setup variable list to print
vars_ = [n+1, sub_df[loc_var].values[n], time_.day, time_.month]
vars_ += [time_.year, time_.hour, time_.minute]
# Extract lat, lon, and pressure for time
coord_vars = LAT_var, LON_var, PRESS_var, OBS_var
vars_ += [float(sub_df[i].values[n]) for i in coord_vars]
# Set formating
vars_ = pstr.format(*vars_)
# Print to file
print(vars_, file=a)
# Add footer to pf.dat file
print(endstr, file=a)
a.close()
def get_pf_headers(file, debug=False):
"""
Extract column headers from a GEOS-Chem planeflight csv file
Parameters
-------
file (str): filename to open
debug (bool): debug the function?
Returns
-------
(list, list)
"""
if debug:
print(file)
# Open pf file
with open(file, 'r') as f:
Lines = [i for i in f]
names = Lines[0].strip().split()
points = [i.strip().split()[0] for i in Lines[1:]]
# reader = csv.reader(f, delimiter=' ', skipinitialspace=True)
# for row in f:
# if row[0] != 'POINT':
# new = row[1:2][0]
# try:
# points.append(new)
# except:
# points = [new]
# else:
# names = row[2:]
if debug:
print(names, list(set(points)))
return names, list(set(points))
def pf_csv2pandas(file=None, vars=None, epoch=False, r_vars=False,
debug=False):
"""
Planeflight.dat CSV reader - used for processor GEOS-Chem PF output
Parameters
-------
file (str): file name (inc. directory)
vars (list): vars to extract
epoch (bool):
r_vars (bool): return list of vars
Returns
-------
(pd.DataFrame)
"""
# Open file
with open(file, 'r') as f:
logging.debug(f)
# Label 1st column ( + LOC ) if names not in vars
# ( This effectively means that pandas arrays are the same )
if debug:
print([type(i) for i in (vars, ['POINT', 'LOC'])])
if 'POINT' not in vars:
names = ['POINT', 'LOC'] + vars[:-1]
else:
names = vars
if debug:
print(vars, names)
# Convert to pandas array
df = pd.read_csv(f, header=None, skiprows=1,
delim_whitespace=True, names=names,
dtype={'HHMM': str, 'YYYYMMDD': str, 'POINT': object}
)
# Convert strings to datetime using pandas mapping
df = DF_YYYYMMDD_HHMM_2_dt(df, rmvars=None, epoch=epoch)
if debug:
print(df, df.shape)
# Return pandas DataFrame
if r_vars:
return df, list(df.columns)
else:
return df
def get_pf_from_folder(folder='./', dates2use=None, debug=False):
"""
Get GEOS-Chem planeflight output from folder
"""
# Which files to use?
files = list(sorted(glob.glob(folder+'/*plane.*')))
if debug:
print(files)
# Only open dates for certain dates?
if not isinstance(dates2use, type(None)):
FileRootsVar = 'FileRoots'
df = pd.DataFrame(files)
df = pd.DataFrame({FileRootsVar: files})
# Which date format to look for in filenames?
format = '%Y%m%d%H%M'
# Setup a helper function to extract dates from file strings
def get_date_from_filename(x, format=format):
"""
Extract Dates from filenames
Notes
-------
- It is assumed that the date ends the file string before the
format identifier
"""
date_str = x.split('.')[-2]
dt = datetime_.strptime(date_str, format)
return dt
dtVar = 'datetime'
df[dtVar] = df[FileRootsVar].map(get_date_from_filename)
bool = df[dtVar].isin(dates2use)
files = list(df.loc[bool, FileRootsVar].values)
# Get headers
ALL_vars, sites = get_pf_headers(files[0], debug=debug)
# Extract dfs
dfs = [pf_csv2pandas(i, vars=ALL_vars) for i in files]
# Append the dataframes together
df = dfs[0].append(dfs[1:])
return df
def get_pf_data_from_NetCDF_table(ncfile=None, req_var='TRA_69', spec='IO',
loc='CVO', start=None, end=None, ver='1.7',
sdate=None, edate=None,
verbose=False, debug=False):
"""
Extracts data from NetCDF file processed by pf2NetCDF (pandas) converter in PhD_Progs
Returns
-------
(np.array, np.array)
Notes
-------
- TODO re-write and update to comments!
(this function should just used xarray)
"""
# Convert to plane-flight (pf) variable name ('req_var') if not given
if isinstance(req_var, type(None)):
req_var = what_species_am_i(spec, ver=ver, invert=True)
# --- Open NetCDF within nest, and extract data
with Dataset(ncfile, 'r') as rootgrp:
# Select only variables for site
LOC = rootgrp['LOC']
Epoch = rootgrp['Epoch']
data = rootgrp[req_var]
# Convert to numpy array
LOC, Epoch, data = [np.array(i) for i in (LOC, Epoch, data)]
# Select just the variable requests
if debug:
print(LOC)
ind = np.where(LOC == loc)
if debug:
print('indcies where LOC==loc: ', ind)
Epoch = Epoch[ind]
data = data[ind]
# Covert Epoch to datetime
if debug:
print(Epoch[0])
dates = np.array([datetime_.fromtimestamp(i) for i in Epoch])
if debug:
print(dates[0])
# Select dates ( <= add this )
if not isinstance(sdate, type(None)):
data = data[np.where((dates < edate) & (dates >= sdate))]
dates = dates[np.where((dates < edate) & (dates >= sdate))]
return dates, data
def mk_planeflight_input4FAAM_flight(folder=None, ds=None,
flight_ID='C216',
folder4csv=None,
PressVar="PS_RVSM",
LonVar='LON_GIN',
LatVar='LAT_GIN', TimeVar='Time',
LocVar='TYPE', LocName='B-146',
DateVar='datetime',
testing_mode=True, csv_suffix='',
num_tracers=203, rxn_nums=[],
Username='Tomas Sherwen',
slist=None,
Extra_spacings=False
):
"""
Extract the GEOS-CF model for a given FAAM BAe146 flight
Parameters
-------
Returns
-------
(None)
"""
# Retrieve FAAM BAe146 Core NetCDF files
if isinstance(ds, type(None)):
filename = 'core_faam_*_{}_1hz.nc'.format(flight_ID.lower())
file2use = glob.glob(folder+filename)
if len(file2use) > 1:
print('WARNING: more that one file found! (so using latest file)')
print(file2use)
ds = xr.open_dataset(file2use[0])
# Only select the variable of intereest and drop where these are NaNs
df = ds[[PressVar, LatVar, LonVar, TimeVar]].to_dataframe()
df = df.dropna()
# Add a location name (as Type)
df[LocVar] = LocName
# remove the index name and add index values to a column
df.index.name = None
try:
df[DateVar]
except KeyError:
df['datetime'] = df.index.values
# If doing a test, then just extract the first 150 points of flight track
if testing_mode:
df = df.head(150)
# Call planeflight maker...
prt_PlaneFlight_files_v12_plus(df=df, slist=slist,
Extra_spacings=Extra_spacings,
LON_var=LonVar, LAT_var=LatVar,
PRESS_var=PressVar, loc_var=LocVar,
num_tracers=num_tracers, rxn_nums=rxn_nums,
Username=Username,)
def reprocess_split_pf_output_over_2_lines(folder, save_original_file=True):
"""
Combine planeflight dat file lines where output split over 2 lines
"""
files2use = list(sorted(glob.glob(folder+'*plane.log*')))
for file2use in files2use:
with open(file2use, 'r') as file:
lines = [i.strip() for i in file]
file.close() # Force close
if save_original_file:
os.rename(file2use, file2use+'.orig')
else:
os.remove(file2use)
first_part = lines[0::2]
second_part = lines[1::2]
a = open(file2use, 'w')
for n_line, line in enumerate(first_part):
Str2use = '{} {}'
Newline = Str2use.format(first_part[n_line], second_part[n_line])
print(Newline, file=a)
a.close()
|
tsherwen/AC_tools
|
AC_tools/planeflight.py
|
Python
|
mit
| 23,222
|
[
"NetCDF"
] |
b23b593d1db8c65cd274d1494e3f4d16cdceba3a4c40f77e3b8a425a3673139d
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Virtualchain
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Virtualchain
Virtualchain is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Virtualchain is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Virtualchain. If not, see <http://www.gnu.org/licenses/>.
"""
import os
import re
import copy
import binascii
import simplejson
import keylib
from six import int2byte, b, integer_types
from decimal import *
from .keys import btc_script_hex_to_address, btc_is_singlesig, btc_is_multisig, \
btc_script_deserialize, btc_script_serialize, btc_is_singlesig_segwit, btc_is_multisig_segwit, btc_get_singlesig_privkey, \
btc_make_p2sh_p2wpkh_redeem_script, btc_make_p2sh_p2wsh_redeem_script
from .opcodes import *
from ....lib import encoding, ecdsalib, hashing, merkle
from ....lib.config import get_features
import traceback
from ....lib.config import get_logger
log = get_logger('virtualchain')
# signature modes
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 128
# bitcoin constants
UINT_MAX = 4294967295
def read_as_int(ptr, buf, bytez):
ptr[0] += bytez
if ptr[0] > len(buf):
raise ValueError("Invalid transaction: tried to parse {} bytes of {} offset in {}".format(bytez, ptr[0] - bytez, buf.encode('hex')))
ret = encoding.decode( buf[ ptr[0]-bytez:ptr[0] ][::-1], 256 )
return ret
def read_var_int(ptr, buf):
ptr[0] += 1
if ptr[0] > len(buf):
raise ValueError("Invalid transaction: tried to parse {} byte of {} offset in {}".format(1, ptr[0] - 1, buf.encode('hex')))
val = encoding.from_byte_to_int( buf[ ptr[0]-1 ] )
if val < 253:
return val
ret = read_as_int(ptr, buf, pow(2, val - 252))
return ret
def read_bytes(ptr, buf, bytez):
ptr[0] += bytez
if ptr[0] > len(buf):
raise ValueError("Invalid transaction: tried to parse {} bytes of {} offset in {}".format(bytez, ptr[0] - bytez, buf.encode('hex')))
ret = buf[ ptr[0]-bytez:ptr[0] ]
return ret
def peek_bytes(ptr, buf, bytez):
if ptr[0] + bytez > len(buf):
raise ValueError("Invalid transaction: tried to parse {} bytes of {} offset in {}".format(bytez, ptr[0], buf.encode('hex')))
ret = buf[ ptr[0]:ptr[0]+bytez ]
return ret
def read_var_string(ptr, buf):
size = read_var_int(ptr, buf)
return read_bytes(ptr, buf, size)
def read_tx_body(ptr, tx):
"""
Returns {'ins': [...], 'outs': [...]}
"""
_obj = {"ins": [], "outs": [], 'locktime': None}
# number of inputs
ins = read_var_int(ptr, tx)
# all inputs
for i in range(ins):
_obj["ins"].append({
"outpoint": {
"hash": read_bytes(ptr, tx, 32)[::-1],
"index": read_as_int(ptr, tx, 4)
},
"script": read_var_string(ptr, tx),
"sequence": read_as_int(ptr, tx, 4)
})
# number of outputs
outs = read_var_int(ptr, tx)
# all outputs
for i in range(outs):
_obj["outs"].append({
"value": read_as_int(ptr, tx, 8),
"script": read_var_string(ptr, tx)
})
return _obj
def read_tx_witnesses(ptr, tx, num_witnesses):
"""
Returns an array of witness scripts.
Each witness will be a bytestring (i.e. encoding the witness script)
"""
witnesses = []
for i in xrange(0, num_witnesses):
witness_stack_len = read_var_int(ptr, tx)
witness_stack = []
for j in xrange(0, witness_stack_len):
stack_item = read_var_string(ptr, tx)
witness_stack.append(stack_item)
witness_script = btc_witness_script_serialize(witness_stack).decode('hex')
witnesses.append(witness_script)
return witnesses
def make_var_string(string):
"""
Make a var-string (a var-int with the length, concatenated with the data)
Return the hex-encoded string
"""
s = None
if isinstance(string, str) and re.match('^[0-9a-fA-F]*$', string):
# convert from hex to bin, safely
s = binascii.unhexlify(string)
else:
s = string[:]
buf = encoding.num_to_var_int(len(s)) + s
return buf.encode('hex')
def _btc_witness_serialize_unit(unit):
"""
Encode one item of a BTC witness script
Return the encoded item (as a string)
Returns a byte string with the encoded unit
Based on code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
"""
if isinstance(unit, int):
# pass literal
return encoding.from_int_to_byte(unit)
elif unit is None:
# None means OP_0
return b'\x00'
else:
# return as a varint-prefixed string
return make_var_string(unit)
def btc_witness_script_serialize(_stack):
"""
Given a deserialized witness script stack (i.e. the input-specific witness, as an array of
Nones, ints, and strings), turn it back into a hex-encoded script
"""
stack = _stack
if encoding.json_is_base(_stack, 16):
# hex-to-bin all hex strings
stack = encoding.json_changebase(_stack, lambda x: binascii.unhexlify(x))
return encoding.safe_hexlify(_btc_witness_serialize_unit(len(stack)) + ''.join(map(lambda stack_unit: _btc_witness_serialize_unit(stack_unit), stack)))
def btc_witness_script_deserialize(_script):
"""
Given a hex-encoded serialized witness script, turn it into a witness stack
(i.e. an array of Nones, ints, and strings)
"""
script = None
if isinstance(_script, str) and re.match('^[0-9a-fA-F]*$', _script):
# convert from hex to bin, safely
script = binascii.unhexlify(_script)
else:
script = _script[:]
# pointer to byte offset in _script (as an array due to Python scoping rules)
ptr = [0]
witness_stack_len = read_var_int(ptr, script)
witness_stack = []
for _ in xrange(0, witness_stack_len):
stack_item = read_var_string(ptr, script)
witness_stack.append(stack_item)
return witness_stack
def btc_tx_deserialize(_tx, **blockchain_opts):
"""
Given a hex-encoded transaction, decode it into an object
with the following structure:
{
ins: [
{
outpoint: { hash: ..., index: ... },
script: ...,
sequence: ...,
witness_script: ..., # not included if not segwit
}, ...
]
outs: [
{
value: ...,
script: ...
}, ...
],
version: ...,
locktime: ...
}
Derived from pybitcointools (https://github.com/vbuterin/pybitcointools) written by Vitalik Buterin
Throws an exception if there are remaining bytes
"""
tx = None
if isinstance(_tx, str) and re.match('^[0-9a-fA-F]*$', _tx):
# convert from hex to bin, safely
tx = binascii.unhexlify(_tx)
else:
tx = _tx[:]
# pointer to byte offset in _tx (as an array due to Python scoping rules)
ptr = [0]
# top-level tx
obj = {"ins": [], "outs": [], 'version': None, 'locktime': None}
# get version
obj["version"] = read_as_int(ptr, tx, 4)
# segwit? (bip143)
# 5th byte will be 0 and 6th byte will be flags (nonzero) if so
bip143 = peek_bytes(ptr, tx, 2)
if ord(bip143[0]) == 0 and ord(bip143[1]) != 0:
# segwit
# consume marker
read_bytes(ptr, tx, 2)
# get the rest of the body
body = read_tx_body(ptr, tx)
obj['ins'] = body['ins']
obj['outs'] = body['outs']
# read witnesses for each input
witness_scripts = read_tx_witnesses(ptr, tx, len(obj['ins']))
if len(witness_scripts) != len(obj['ins']):
raise ValueError('Invald number of witnesses in {}'.format(_tx))
for i in xrange(0, len(witness_scripts)):
obj['ins'][i]['witness_script'] = witness_scripts[i]
else:
# non-segwit
body = read_tx_body(ptr, tx)
obj['ins'] = body['ins']
obj['outs'] = body['outs']
# locktime
obj["locktime"] = read_as_int(ptr, tx, 4)
if not ptr[0] == len(tx):
# log.warning('Did not parse entire tx ({} bytes remaining)'.format(len(tx) - ptr[0]))
raise ValueError('Did not parse entire tx ({} bytes remaining)'.format(len(tx) - ptr[0]))
# hexlify each byte field
obj = encoding.json_changebase(obj, lambda x: encoding.safe_hexlify(x))
return obj
def btc_tx_serialize(_txobj):
"""
Given a transaction dict returned by btc_tx_deserialize, convert it back into a
hex-encoded byte string.
Derived from code written by Vitalik Buterin in pybitcointools (https://github.com/vbuterin/pybitcointools)
"""
# output buffer
o = []
txobj = None
if encoding.json_is_base(_txobj, 16):
# txobj is built from hex strings already. deserialize them
txobj = encoding.json_changebase(_txobj, lambda x: binascii.unhexlify(x))
else:
txobj = copy.deepcopy(_txobj)
# version
o.append(encoding.encode(txobj["version"], 256, 4)[::-1])
# do we have any witness scripts?
have_witness = False
for inp in txobj['ins']:
if inp.has_key('witness_script') and len(inp['witness_script']) > 0:
have_witness = True
break
if have_witness:
# add segwit marker
o.append('\x00\x01')
# number of inputs
o.append(encoding.num_to_var_int(len(txobj["ins"])))
# all inputs
for inp in txobj["ins"]:
# input tx hash
o.append(inp["outpoint"]["hash"][::-1])
# input tx outpoint
o.append(encoding.encode(inp["outpoint"]["index"], 256, 4)[::-1])
# input scriptsig
script = inp.get('script')
if not script:
script = bytes()
scriptsig = encoding.num_to_var_int(len(script)) + script
o.append(scriptsig)
# sequence
o.append(encoding.encode(inp.get("sequence", UINT_MAX - 1), 256, 4)[::-1])
# number of outputs
o.append(encoding.num_to_var_int(len(txobj["outs"])))
# all outputs
for out in txobj["outs"]:
# value
o.append(encoding.encode(out["value"], 256, 8)[::-1])
# scriptPubKey
scriptpubkey = encoding.num_to_var_int(len(out['script'])) + out['script']
o.append(scriptpubkey)
# add witnesses
if have_witness:
for inp in txobj['ins']:
witness_script = inp.get('witness_script')
if not witness_script:
witness_script = '\x00'
o.append(witness_script)
# locktime
o.append(encoding.encode(txobj["locktime"], 256, 4)[::-1])
# full string
ret = ''.join( encoding.json_changebase(o, lambda x: encoding.safe_hexlify(x)) )
return ret
def btc_bitcoind_tx_is_coinbase( tx ):
"""
Is a transaction a coinbase transaction?
tx is a bitcoind-given transaction structure
"""
for inp in tx['vin']:
if 'coinbase' in inp.keys():
return True
return False
def btc_bitcoind_tx_serialize( tx ):
"""
Convert a *Bitcoind*-given transaction into its hex string.
tx format is {'vin': [...], 'vout': [...], 'locktime': ..., 'version': ...},
with the same formatting rules as getrawtransaction.
(in particular, each value in vout is a Decimal, in BTC)
"""
tx_ins = []
tx_outs = []
try:
for inp in tx['vin']:
next_inp = {
"outpoint": {
"index": int(inp['vout']),
"hash": str(inp['txid'])
}
}
if 'sequence' in inp:
next_inp['sequence'] = int(inp['sequence'])
else:
next_inp['sequence'] = UINT_MAX
if 'scriptSig' in inp:
next_inp['script'] = str(inp['scriptSig']['hex'])
else:
next_inp['script'] = ""
if 'txinwitness' in inp:
next_inp['witness_script'] = btc_witness_script_serialize(inp['txinwitness'])
tx_ins.append(next_inp)
for out in tx['vout']:
assert out['value'] < 1000, "High transaction value\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True)
next_out = {
'value': int(Decimal(out['value'] * 10**8)),
'script': str(out['scriptPubKey']['hex'])
}
tx_outs.append(next_out)
tx_fields = {
"locktime": int(tx['locktime']),
"version": int(tx['version']),
"ins": tx_ins,
"outs": tx_outs
}
tx_serialized = btc_tx_serialize( tx_fields )
return str(tx_serialized)
except KeyError, ke:
if btc_bitcoind_tx_is_coinbase(tx) and 'hex' in tx.keys():
tx_serialized = tx['hex']
return str(tx_serialized)
log.error("Key error in:\n%s" % simplejson.dumps(tx, indent=4, sort_keys=True))
traceback.print_exc()
raise ke
def btc_tx_is_segwit( tx_serialized ):
"""
Is this serialized (hex-encoded) transaction a segwit transaction?
"""
marker_offset = 4 # 5th byte is the marker byte
flag_offset = 5 # 6th byte is the flag byte
marker_byte_string = tx_serialized[2*marker_offset:2*(marker_offset+1)]
flag_byte_string = tx_serialized[2*flag_offset:2*(flag_offset+1)]
if marker_byte_string == '00' and flag_byte_string != '00':
# segwit (per BIP144)
return True
else:
return False
def btc_tx_witness_strip( tx_serialized ):
"""
Strip the witness information from a serialized transaction
"""
if not btc_tx_is_segwit(tx_serialized):
# already strippped
return tx_serialized
tx = btc_tx_deserialize(tx_serialized)
for inp in tx['ins']:
del inp['witness_script']
tx_stripped = btc_tx_serialize(tx)
return tx_stripped
def btc_tx_get_hash( tx_serialized, hashcode=None ):
"""
Make a transaction hash (txid) from a hex tx, optionally along with a sighash.
This DOES NOT WORK for segwit transactions
"""
if btc_tx_is_segwit(tx_serialized):
raise ValueError('Segwit transaction: {}'.format(tx_serialized))
tx_bin = binascii.unhexlify(tx_serialized)
if hashcode:
return binascii.hexlify( hashing.bin_double_sha256(tx_bin + encoding.encode(int(hashcode), 256, 4)[::-1]) )
else:
return binascii.hexlify( hashing.bin_double_sha256(tx_bin)[::-1] )
def btc_tx_script_to_asm( script_hex ):
"""
Decode a script into assembler
"""
if len(script_hex) == 0:
return ""
try:
script_array = btc_script_deserialize(script_hex)
except:
log.error("Failed to convert '%s' to assembler" % script_hex)
raise
script_tokens = []
for token in script_array:
if token is None:
token = 0
token_name = None
if type(token) in [int,long]:
token_name = OPCODE_NAMES.get(token, None)
if token_name is None:
token_name = str(token)
else:
token_name = token
script_tokens.append(token_name)
return " ".join(script_tokens)
def btc_tx_output_has_data(output, **blockchain_opts):
"""
Does this output have user data?
@output must be an element from the 'outs' list in btc_tx_deserialize()
"""
return btc_tx_output_script_has_data(output['script'], **blockchain_opts)
def btc_tx_output_script_has_data(output_script, **blockchain_opts):
"""
Does a btc output script have data? i.e. is it an OP_RETURN?
The script must be hex-encoded
"""
if len(output_script) < 2:
return False
return int(output_script[0:2], 16) == OPCODE_VALUES['OP_RETURN']
def btc_tx_extend(partial_tx_hex, new_inputs, new_outputs, **blockchain_opts):
"""
Given an unsigned serialized transaction, add more inputs and outputs to it.
@new_inputs and @new_outputs will be virtualchain-formatted:
* new_inputs[i] will have {'outpoint': {'index':..., 'hash':...}, 'script':..., 'witness_script': ...}
* new_outputs[i] will have {'script':..., 'value':... (in fundamental units, e.g. satoshis!)}
"""
# recover tx
tx = btc_tx_deserialize(partial_tx_hex)
tx_inputs, tx_outputs = tx['ins'], tx['outs']
locktime, version = tx['locktime'], tx['version']
tx_inputs += new_inputs
tx_outputs += new_outputs
new_tx = {
'ins': tx_inputs,
'outs': tx_outputs,
'locktime': locktime,
'version': version,
}
new_unsigned_tx = btc_tx_serialize(new_tx)
return new_unsigned_tx
def btc_tx_der_encode_integer(r):
"""
Return a DER-encoded integer
Based on code from python-ecdsa (https://github.com/warner/python-ecdsa)
by Brian Warner. Subject to the MIT license.
"""
# borrowed from python-ecdsa
if r < 0:
raise ValueError('cannot support negative numbers')
h = ("%x" % r).encode()
if len(h) % 2:
h = b("0") + h
s = binascii.unhexlify(h)
num = s[0] if isinstance(s[0], integer_types) else ord(s[0])
if num <= 0x7f:
return b("\x02") + int2byte(len(s)) + s
else:
# DER integers are two's complement, so if the first byte is
# 0x80-0xff then we need an extra 0x00 byte to prevent it from
# looking negative.
return b("\x02") + int2byte(len(s)+1) + b("\x00") + s
def btc_tx_der_encode_length(l):
"""
Return a DER-encoded length field
Based on code from python-ecdsa (https://github.com/warner/python-ecdsa)
by Brian Warner. Subject to the MIT license.
"""
if l < 0:
raise ValueError("length cannot be negative")
if l < 0x80:
return int2byte(l)
s = ("%x" % l).encode()
if len(s) % 2:
s = b("0") + s
s = binascii.unhexlify(s)
llen = len(s)
return int2byte(0x80 | llen) + s
def btc_tx_der_encode_sequence(*encoded_pieces):
"""
Return a DER-encoded sequence
Based on code from python-ecdsa (https://github.com/warner/python-ecdsa)
by Brian Warner. Subject to the MIT license.
"""
# borrowed from python-ecdsa
total_len = sum([len(p) for p in encoded_pieces])
return b('\x30') + btc_tx_der_encode_length(total_len) + b('').join(encoded_pieces)
def btc_tx_der_encode_signature(r, s):
"""
Return a DER-encoded signature as a 2-item sequence
Based on code from python-ecdsa (https://github.com/warner/python-ecdsa)
by Brian Warner. Subject to the MIT license.
"""
# borrowed from python-ecdsa
return btc_tx_der_encode_sequence(btc_tx_der_encode_integer(r), btc_tx_der_encode_integer(s))
def btc_tx_sighash( tx, idx, script, hashcode=SIGHASH_ALL):
"""
Calculate the sighash of a non-segwit transaction.
If it's SIGHASH_NONE, then digest the inputs but no outputs
If it's SIGHASH_SINGLE, then digest all inputs and all outputs up to i (excluding values and scripts), and fully digest the ith input and output
If it's (something) | SIGHASH_ANYONECANPAY, then only digest the ith input.
Return the double-sha256 digest of the relevant fields.
THIS DOES NOT WORK WITH SEGWIT OUTPUTS
Adapted from https://github.com/vbuterin/pybitcointools, by Vitalik Buterin
"""
txobj = btc_tx_deserialize(tx)
idx = int(idx)
hashcode = int(hashcode)
newtx = copy.deepcopy(txobj)
# remove all scriptsigs in all inputs, except for the ith input's scriptsig.
# the other inputs will be 'partially signed', except for SIGHASH_ANYONECANPAY mode.
for i in xrange(0, len(newtx['ins'])):
newtx['ins'][i]["script"] = ''
if i == idx:
if newtx['ins'][i].has_key('witness_script') and newtx['ins'][i]['witness_script']:
raise ValueError('this method does not handle segwit inputs')
if newtx['ins'][i].has_key('witness_script'):
del newtx['ins'][i]['witness_script']
newtx["ins"][idx]["script"] = script
if (hashcode & 0x1f) == SIGHASH_NONE:
# don't care about the outputs with this signature
newtx["outs"] = []
for inp in newtx['ins']:
inp['sequence'] = 0
elif (hashcode & 0x1f) == SIGHASH_SINGLE:
# only signing for this input.
# all outputs after this input will not be signed.
# all outputs before this input will be partially signed (but not their values or scripts)
if len(newtx['ins']) > len(newtx['outs']):
raise ValueError('invalid hash code: {} inputs but {} outputs'.format(len(newtx['ins']), len(newtx['outs'])))
newtx["outs"] = newtx["outs"][:len(newtx["ins"])]
for out in newtx["outs"][:len(newtx["ins"]) - 1]:
out['value'] = 2**64 - 1
out['script'] = ""
elif (hashcode & SIGHASH_ANYONECANPAY) != 0:
# only going to sign this specific input, and nothing else
newtx["ins"] = [newtx["ins"][idx]]
signing_tx = btc_tx_serialize(newtx)
sighash = btc_tx_get_hash( signing_tx, hashcode )
return sighash
def btc_tx_sighash_segwit(tx, i, prevout_amount, prevout_script, hashcode=SIGHASH_ALL):
"""
Calculate the sighash for a segwit transaction, according to bip143
"""
txobj = btc_tx_deserialize(tx)
hash_prevouts = encoding.encode(0, 256, 32)
hash_sequence = encoding.encode(0, 256, 32)
hash_outputs = encoding.encode(0, 256, 32)
if (hashcode & SIGHASH_ANYONECANPAY) == 0:
prevouts = ''
for inp in txobj['ins']:
prevouts += hashing.reverse_hash(inp['outpoint']['hash'])
prevouts += encoding.encode(inp['outpoint']['index'], 256, 4)[::-1].encode('hex')
hash_prevouts = hashing.bin_double_sha256(prevouts.decode('hex'))
# print 'prevouts: {}'.format(prevouts)
if (hashcode & SIGHASH_ANYONECANPAY) == 0 and (hashcode & 0x1f) != SIGHASH_SINGLE and (hashcode & 0x1f) != SIGHASH_NONE:
sequences = ''
for inp in txobj['ins']:
sequences += encoding.encode(inp['sequence'], 256, 4)[::-1].encode('hex')
hash_sequence = hashing.bin_double_sha256(sequences.decode('hex'))
# print 'sequences: {}'.format(sequences)
if (hashcode & 0x1f) != SIGHASH_SINGLE and (hashcode & 0x1f) != SIGHASH_NONE:
outputs = ''
for out in txobj['outs']:
outputs += encoding.encode(out['value'], 256, 8)[::-1].encode('hex')
outputs += make_var_string(out['script'])
hash_outputs = hashing.bin_double_sha256(outputs.decode('hex'))
# print 'outputs: {}'.format(outputs)
elif (hashcode & 0x1f) == SIGHASH_SINGLE and i < len(txobj['outs']):
outputs = ''
outputs += encoding.encode(txobj['outs'][i]['value'], 256, 8)[::-1].encode('hex')
outputs += make_var_string(txobj['outs'][i]['script'])
hash_outputs = hashing.bin_double_sha256(outputs.decode('hex'))
# print 'outputs: {}'.format(outputs)
# print 'hash_prevouts: {}'.format(hash_prevouts.encode('hex'))
# print 'hash_sequence: {}'.format(hash_sequence.encode('hex'))
# print 'hash_outputs: {}'.format(hash_outputs.encode('hex'))
# print 'prevout_script: {}'.format(prevout_script)
# print 'prevout_amount: {}'.format(prevout_amount)
sighash_preimage = ''
sighash_preimage += encoding.encode(txobj['version'], 256, 4)[::-1].encode('hex')
sighash_preimage += hash_prevouts.encode('hex')
sighash_preimage += hash_sequence.encode('hex')
# this input's prevout, script, amount, and sequence
sighash_preimage += hashing.reverse_hash(txobj['ins'][i]['outpoint']['hash'])
sighash_preimage += encoding.encode(txobj['ins'][i]['outpoint']['index'], 256, 4)[::-1].encode('hex')
sighash_preimage += make_var_string(prevout_script)
sighash_preimage += encoding.encode(prevout_amount, 256, 8)[::-1].encode('hex')
sighash_preimage += encoding.encode(txobj['ins'][i]['sequence'], 256, 4)[::-1].encode('hex')
sighash_preimage += hash_outputs.encode('hex')
sighash_preimage += encoding.encode(txobj['locktime'], 256, 4)[::-1].encode('hex')
sighash_preimage += encoding.encode(hashcode, 256, 4)[::-1].encode('hex')
sighash = hashing.bin_double_sha256(sighash_preimage.decode('hex')).encode('hex')
# print 'sighash_preimage: {}'.format(sighash_preimage)
# print 'sighash: {}'.format(sighash)
return sighash
def btc_tx_make_input_signature(tx, idx, prevout_script, privkey_str, hashcode):
"""
Sign a single input of a transaction, given the serialized tx,
the input index, the output's scriptPubkey, and the hashcode.
tx must be a hex-encoded string
privkey_str must be a hex-encoded private key
Return the hex signature.
THIS DOES NOT WORK WITH SEGWIT TRANSACTIONS
"""
if btc_tx_is_segwit(tx):
raise ValueError('tried to use the standard sighash to sign a segwit transaction')
pk = ecdsalib.ecdsa_private_key(str(privkey_str))
priv = pk.to_hex()
# get the parts of the tx we actually need to sign
sighash = btc_tx_sighash(tx, idx, prevout_script, hashcode)
# print 'non-segwit sighash: {}'.format(sighash)
# sign using uncompressed private key
pk_uncompressed_hex, pubk_uncompressed_hex = ecdsalib.get_uncompressed_private_and_public_keys(priv)
sigb64 = ecdsalib.sign_digest( sighash, priv )
# sanity check
# assert ecdsalib.verify_digest( txhash, pubk_uncompressed_hex, sigb64 )
sig_r, sig_s = ecdsalib.decode_signature(sigb64)
sig_bin = btc_tx_der_encode_signature(sig_r, sig_s)
sig = sig_bin.encode('hex') + encoding.encode(hashcode, 16, 2)
return sig
def btc_tx_make_input_signature_segwit(tx, idx, prevout_amount, prevout_script, privkey_str, hashcode):
"""
Sign a single input of a transaction, given the serialized tx,
the input index, the output's scriptPubkey, and the hashcode.
tx must be a hex-encoded string
privkey_str must be a hex-encoded private key
Return the hex signature.
"""
# always compressed
if len(privkey_str) == 64:
privkey_str += '01'
pk = ecdsalib.ecdsa_private_key(str(privkey_str))
pubk = pk.public_key()
priv = pk.to_hex()
# must always be compressed
pub = keylib.key_formatting.compress(pubk.to_hex())
sighash = btc_tx_sighash_segwit(tx, idx, prevout_amount, prevout_script, hashcode=hashcode)
# sign using uncompressed private key
# pk_uncompressed_hex, pubk_uncompressed_hex = ecdsalib.get_uncompressed_private_and_public_keys(priv)
sigb64 = ecdsalib.sign_digest( sighash, priv )
# sanity check
# assert ecdsalib.verify_digest( txhash, pubk_uncompressed_hex, sigb64 )
sig_r, sig_s = ecdsalib.decode_signature(sigb64)
sig_bin = btc_tx_der_encode_signature(sig_r, sig_s)
sig = sig_bin.encode('hex') + encoding.encode(hashcode, 16, 2)
# print 'segwit signature: {}'.format(sig)
return sig
def btc_tx_sign_multisig(tx, idx, redeem_script, private_keys, hashcode=SIGHASH_ALL):
"""
Sign a p2sh multisig input (not segwit!).
@tx must be a hex-encoded tx
Return the signed transaction
"""
from .multisig import parse_multisig_redeemscript
# sign in the right order. map all possible public keys to their private key
txobj = btc_tx_deserialize(str(tx))
privs = {}
for pk in private_keys:
pubk = ecdsalib.ecdsa_private_key(pk).public_key().to_hex()
compressed_pubkey = keylib.key_formatting.compress(pubk)
uncompressed_pubkey = keylib.key_formatting.decompress(pubk)
privs[compressed_pubkey] = pk
privs[uncompressed_pubkey] = pk
m, public_keys = parse_multisig_redeemscript(str(redeem_script))
used_keys, sigs = [], []
for public_key in public_keys:
if public_key not in privs:
continue
if len(used_keys) == m:
break
if public_key in used_keys:
raise ValueError('Tried to reuse key in redeem script: {}'.format(public_key))
pk_str = privs[public_key]
used_keys.append(public_key)
sig = btc_tx_make_input_signature(tx, idx, redeem_script, pk_str, hashcode)
sigs.append(sig)
if len(used_keys) != m:
raise ValueError('Missing private keys (used {}, required {})'.format(len(used_keys), m))
txobj["ins"][idx]["script"] = btc_script_serialize([None] + sigs + [redeem_script])
return btc_tx_serialize(txobj)
def btc_tx_sign_multisig_segwit(tx, idx, prevout_amount, witness_script, private_keys, hashcode=SIGHASH_ALL, hashcodes=None, native=False):
"""
Sign a native p2wsh or p2sh-p2wsh multisig input.
@tx must be a hex-encoded tx
Return the signed transaction
"""
from .multisig import parse_multisig_redeemscript
if hashcodes is None:
hashcodes = [hashcode] * len(private_keys)
txobj = btc_tx_deserialize(str(tx))
privs = {}
for pk in private_keys:
pubk = ecdsalib.ecdsa_private_key(pk).public_key().to_hex()
compressed_pubkey = keylib.key_formatting.compress(pubk)
privs[compressed_pubkey] = pk
m, public_keys = parse_multisig_redeemscript(witness_script)
used_keys, sigs = [], []
for i, public_key in enumerate(public_keys):
if public_key not in privs:
continue
if len(used_keys) == m:
break
if public_key in used_keys:
raise ValueError('Tried to reuse key in witness script: {}'.format(public_key))
pk_str = privs[public_key]
used_keys.append(public_key)
sig = btc_tx_make_input_signature_segwit(tx, idx, prevout_amount, witness_script, pk_str, hashcodes[i])
sigs.append(sig)
# print ''
if len(used_keys) != m:
raise ValueError('Missing private keys (used {}, required {})'.format(len(used_keys), m))
if native:
# native p2wsh
txobj['ins'][idx]['witness_script'] = btc_witness_script_serialize([None] + sigs + [witness_script])
# print 'segwit multisig: native p2wsh: witness script {}'.format(txobj['ins'][idx]['witness_script'])
else:
# p2sh-p2wsh
redeem_script = btc_make_p2sh_p2wsh_redeem_script(witness_script)
txobj['ins'][idx]['script'] = redeem_script
txobj['ins'][idx]['witness_script'] = btc_witness_script_serialize([None] + sigs + [witness_script])
# print 'segwit multisig: p2sh p2wsh: witness script {}'.format(txobj['ins'][idx]['witness_script'])
# print 'segwit multisig: p2sh p2wsh: redeem script {}'.format(txobj['ins'][idx]['script'])
return btc_tx_serialize(txobj)
def btc_tx_sign(tx_hex, idx, prevout_script, prevout_amount, private_key_info, scriptsig_type, hashcode=SIGHASH_ALL, hashcodes=None, redeem_script=None, witness_script=None):
"""
Insert a scriptsig for an input that will later be spent by a p2pkh, p2pk, or p2sh scriptPubkey.
@private_key_info is either a single private key, or a dict with 'redeem_script' and 'private_keys' defined.
@redeem_script, if given, must NOT start with the varint encoding its length.
However, it must otherwise be a hex string
Return the transaction with the @idx'th scriptSig filled in.
"""
new_tx = None
# print 'sign input {} as {}'.format(idx, scriptsig_type)
if scriptsig_type in ['p2pkh', 'p2pk']:
if not btc_is_singlesig(private_key_info):
raise ValueError('Need only one private key for {}'.format(scriptsig_type))
pk = ecdsalib.ecdsa_private_key(str(private_key_info))
pubk = pk.public_key()
pub = pubk.to_hex()
sig = btc_tx_make_input_signature(tx_hex, idx, prevout_script, private_key_info, hashcode)
# print 'non-segwit sig: {}'.format(sig)
# print 'non-segwit pubk: {}'.format(pub)
# NOTE: sig and pub need to be hex-encoded
txobj = btc_tx_deserialize(str(tx_hex))
if scriptsig_type == 'p2pkh':
# scriptSig + scriptPubkey is <signature> <pubkey> OP_DUP OP_HASH160 <pubkeyhash> OP_EQUALVERIFY OP_CHECKSIG
txobj['ins'][idx]['script'] = btc_script_serialize([sig, pub])
else:
# p2pk
# scriptSig + scriptPubkey is <signature> <pubkey> OP_CHECKSIG
txobj['ins'][idx]['script'] = btc_script_serialize([sig])
new_tx = btc_tx_serialize(txobj)
elif scriptsig_type == 'p2wpkh' or scriptsig_type == 'p2sh-p2wpkh':
# must be a segwit singlesig bundle
if not btc_is_singlesig_segwit(private_key_info):
raise ValueError('Keys are not for p2wpkh or p2sh-p2wpkh')
privkey_str = str(btc_get_singlesig_privkey(private_key_info))
pk = ecdsalib.ecdsa_private_key(privkey_str)
pubk = pk.public_key()
pub = keylib.key_formatting.compress(pubk.to_hex())
# special bip141 rule: this is always 0x1976a914{20-byte-pubkey-hash}88ac
prevout_script_sighash = '76a914' + hashing.bin_hash160(pub.decode('hex')).encode('hex') + '88ac'
sig = btc_tx_make_input_signature_segwit(tx_hex, idx, prevout_amount, prevout_script_sighash, privkey_str, hashcode)
txobj = btc_tx_deserialize(str(tx_hex))
txobj['ins'][idx]['witness_script'] = btc_witness_script_serialize([sig, pub])
if scriptsig_type == 'p2wpkh':
# native
# NOTE: sig and pub need to be hex-encoded
txobj['ins'][idx]['script'] = ''
if redeem_script:
# goes in scriptSig
txobj['ins'][idx]['script'] = btc_script_serialize([redeem_script])
else:
# p2sh-p2wpkh
redeem_script = btc_make_p2sh_p2wpkh_redeem_script(pub)
txobj['ins'][idx]['script'] = redeem_script
# print 'scriptsig {} from {} is {}'.format(pub, privkey_str, txobj['ins'][idx]['script'])
new_tx = btc_tx_serialize(txobj)
# print 'scriptsig type: {}'.format(scriptsig_type)
# print 'segwit scriptsig: {}'.format(txobj['ins'][idx]['script'])
# print 'segwit witness script: {}'.format(txobj['ins'][idx]['witness_script'])
elif scriptsig_type == 'p2wsh' or scriptsig_type == 'p2sh-p2wsh':
# only support p2wsh for multisig purposes at this time
if not btc_is_multisig_segwit(private_key_info):
raise ValueError('p2wsh requires a multisig key bundle')
native = None
if scriptsig_type == 'p2wsh':
native = True
else:
native = False
new_tx = btc_tx_sign_multisig_segwit(tx_hex, idx, prevout_amount, private_key_info['redeem_script'], private_key_info['private_keys'], hashcode=hashcode, hashcodes=hashcodes, native=native)
txobj = btc_tx_deserialize(new_tx)
# print 'segwit scriptsig: {}'.format(txobj['ins'][idx]['script'])
# print 'segwit witness script: {}'.format(txobj['ins'][idx]['witness_script'])
elif scriptsig_type == 'p2sh':
if not redeem_script:
# p2sh multisig
if not btc_is_multisig(private_key_info):
raise ValueError('No redeem script given, and not a multisig key bundle')
new_tx = btc_tx_sign_multisig(tx_hex, idx, private_key_info['redeem_script'], private_key_info['private_keys'], hashcode=hashcode)
else:
# NOTE: sig and pub need to be hex-encoded
txobj = btc_tx_deserialize(str(tx_hex))
# scriptSig + scriptPubkey is <redeem script> OP_HASH160 <script hash> OP_EQUAL
txobj['ins'][idx]['script'] = btc_script_serialize([redeem_script])
new_tx = btc_tx_serialize(txobj)
else:
raise ValueError("Unknown script type {}".format(scriptsig_type))
return new_tx
def btc_script_classify(scriptpubkey, private_key_info=None):
"""
Classify a scriptpubkey, optionally also using the private key info that will generate the corresponding scriptsig/witness
Return None if not known (nonstandard)
"""
if scriptpubkey.startswith("76a914") and scriptpubkey.endswith("88ac") and len(scriptpubkey) == 50:
return 'p2pkh'
elif scriptpubkey.startswith("a914") and scriptpubkey.endswith("87") and len(scriptpubkey) == 46:
# maybe p2sh-p2wpkh or p2sh-p2wsh?
if private_key_info:
if btc_is_singlesig_segwit(private_key_info):
return 'p2sh-p2wpkh'
elif btc_is_multisig_segwit(private_key_info):
return 'p2sh-p2wsh'
return 'p2sh'
elif scriptpubkey.startswith('0014') and len(scriptpubkey) == 44:
return 'p2wpkh'
elif scriptpubkey.startswith('0020') and len(scriptpubkey) == 68:
return 'p2wsh'
script_tokens = btc_script_deserialize(scriptpubkey)
if len(script_tokens) == 0:
return None
if script_tokens[0] == OPCODE_VALUES['OP_RETURN']:
return "nulldata"
elif script_tokens[-1] == OPCODE_VALUES['OP_CHECKMULTISIG']:
return "multisig"
elif len(script_tokens) == 2 and script_tokens[-1] == OPCODE_VALUES["OP_CHECKSIG"]:
return "p2pk"
return None
def btc_privkey_scriptsig_classify(private_key_info):
"""
What kind of scriptsig can this private key make?
"""
if btc_is_singlesig(private_key_info):
return 'p2pkh'
if btc_is_multisig(private_key_info):
return 'p2sh'
if btc_is_singlesig_segwit(private_key_info):
return 'p2sh-p2wpkh'
if btc_is_multisig_segwit(private_key_info):
return 'p2sh-p2wsh'
return None
def btc_tx_sign_input(tx, idx, prevout_script, prevout_amount, private_key_info, hashcode=SIGHASH_ALL, hashcodes=None, segwit=None, scriptsig_type=None, redeem_script=None, witness_script=None, **blockchain_opts):
"""
Sign a particular input in the given transaction.
@private_key_info can either be a private key, or it can be a dict with 'redeem_script' and 'private_keys' defined
Returns the tx with the signed input
"""
if segwit is None:
segwit = get_features('segwit')
if scriptsig_type is None:
scriptsig_type = btc_privkey_scriptsig_classify(private_key_info)
if scriptsig_type in ['p2wpkh', 'p2wsh', 'p2sh-p2wpkh', 'p2sh-p2wsh'] and not segwit:
raise ValueError("Segwit is not enabled, but {} is a segwit scriptsig type".format(prevout_script))
return btc_tx_sign(tx, idx, prevout_script, prevout_amount, private_key_info, scriptsig_type, hashcode=hashcode, hashcodes=hashcodes, redeem_script=redeem_script, witness_script=witness_script)
def btc_tx_sign_all_unsigned_inputs(private_key_info, prev_outputs, unsigned_tx_hex, scriptsig_type=None, segwit=None, **blockchain_opts):
"""
Sign all unsigned inputs with a given key.
Use the given outputs to fund them.
@private_key_info: either a hex private key, or a dict with 'private_keys' and 'redeem_script'
defined as keys.
@prev_outputs: a list of {'out_script': xxx, 'value': xxx} that are in 1-to-1 correspondence with the unsigned inputs in the tx ('value' is in satoshis)
@unsigned_hex_tx: hex transaction with unsigned inputs
Returns: signed hex transaction
"""
if segwit is None:
segwit = get_features('segwit')
txobj = btc_tx_deserialize(unsigned_tx_hex)
inputs = txobj['ins']
if scriptsig_type is None:
scriptsig_type = btc_privkey_scriptsig_classify(private_key_info)
tx_hex = unsigned_tx_hex
prevout_index = 0
# import json
# print ''
# print 'transaction:\n{}'.format(json.dumps(btc_tx_deserialize(unsigned_tx_hex), indent=4, sort_keys=True))
# print 'prevouts:\n{}'.format(json.dumps(prev_outputs, indent=4, sort_keys=True))
# print ''
for i, inp in enumerate(inputs):
do_witness_script = segwit
if inp.has_key('witness_script'):
do_witness_script = True
elif segwit:
# all inputs must receive a witness script, even if it's empty
inp['witness_script'] = ''
if (inp['script'] and len(inp['script']) > 0) or (inp.has_key('witness_script') and len(inp['witness_script']) > 0):
continue
if prevout_index >= len(prev_outputs):
raise ValueError("Not enough prev_outputs ({} given, {} more prev-outputs needed)".format(len(prev_outputs), len(inputs) - prevout_index))
# tx with index i signed with privkey
tx_hex = btc_tx_sign_input(str(unsigned_tx_hex), i, prev_outputs[prevout_index]['out_script'], prev_outputs[prevout_index]['value'], private_key_info, segwit=do_witness_script, scriptsig_type=scriptsig_type)
unsigned_tx_hex = tx_hex
prevout_index += 1
return tx_hex
def block_header_serialize( inp ):
"""
Given block header information, serialize it and return the hex hash.
inp has:
* version (int)
* prevhash (str)
* merkle_root (str)
* timestamp (int)
* bits (int)
* nonce (int)
Based on code from pybitcointools (https://github.com/vbuterin/pybitcointools)
by Vitalik Buterin
"""
# concatenate to form header
o = encoding.encode(inp['version'], 256, 4)[::-1] + \
inp['prevhash'].decode('hex')[::-1] + \
inp['merkle_root'].decode('hex')[::-1] + \
encoding.encode(inp['timestamp'], 256, 4)[::-1] + \
encoding.encode(inp['bits'], 256, 4)[::-1] + \
encoding.encode(inp['nonce'], 256, 4)[::-1]
# get (reversed) hash
h = hashing.bin_sha256(hashing.bin_sha256(o))[::-1].encode('hex')
assert h == inp['hash'], (hashing.bin_sha256(o).encode('hex'), inp['hash'])
return o.encode('hex')
def block_header_to_hex( block_data, prev_hash ):
"""
Calculate the hex form of a block's header, given its getblock information from bitcoind.
"""
header_info = {
"version": block_data['version'],
"prevhash": prev_hash,
"merkle_root": block_data['merkleroot'],
"timestamp": block_data['time'],
"bits": int(block_data['bits'], 16),
"nonce": block_data['nonce'],
"hash": block_data['hash']
}
return block_header_serialize(header_info)
def block_header_verify( block_data, prev_hash, block_hash ):
"""
Verify whether or not bitcoind's block header matches the hash we expect.
"""
serialized_header = block_header_to_hex( block_data, prev_hash )
candidate_hash_bin_reversed = hashing.bin_double_sha256(binascii.unhexlify(serialized_header))
candidate_hash = binascii.hexlify( candidate_hash_bin_reversed[::-1] )
return block_hash == candidate_hash
def block_verify( block_data ):
"""
Given block data (a dict with 'merkleroot' hex string and 'tx' list of hex strings--i.e.
a block compatible with bitcoind's getblock JSON RPC method), verify that the
transactions are consistent.
Return True on success
Return False if not.
"""
# verify block data txs
m = merkle.MerkleTree( block_data['tx'] )
root_hash = str(m.root())
return root_hash == str(block_data['merkleroot'])
def btc_tx_output_parse_script( scriptpubkey ):
"""
Given the hex representation of a script,
turn it into a nice, easy-to-read dict.
The dict will have:
* asm: the disassembled script as a string
* hex: the raw hex (given as an argument)
* type: the type of script
Optionally, it will have:
* addresses: a list of addresses the script represents (if applicable)
* reqSigs: the number of required signatures (if applicable)
"""
script_type = None
reqSigs = None
addresses = []
script_type = btc_script_classify(scriptpubkey)
script_tokens = btc_script_deserialize(scriptpubkey)
if script_type in ['p2pkh']:
script_type = "pubkeyhash"
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError("Failed to parse scriptpubkey address")
addresses = [addr]
elif script_type in ['p2sh', 'p2sh-p2wpkh', 'p2sh-p2wsh']:
script_type = "scripthash"
reqSigs = 1
addr = btc_script_hex_to_address(scriptpubkey)
if not addr:
raise ValueError("Failed to parse scriptpubkey address")
addresses = [addr]
elif script_type == 'p2pk':
script_type = "pubkey"
reqSigs = 1
elif script_type is None:
script_type = "nonstandard"
ret = {
"asm": btc_tx_script_to_asm(scriptpubkey),
"hex": scriptpubkey,
"type": script_type
}
if addresses is not None:
ret['addresses'] = addresses
if reqSigs is not None:
ret['reqSigs'] = reqSigs
# print 'parse script {}: {}'.format(scriptpubkey, ret)
return ret
|
blockstack/virtualchain
|
virtualchain/lib/blockchain/bitcoin_blockchain/bits.py
|
Python
|
gpl-3.0
| 45,795
|
[
"Brian"
] |
6fce3b3cb2d651ad8d2ba5a22ea22fb0d28e28a80d21aa3c7bd5c2c9e14fabad
|
#!/usr/bin/env python
# Copyright 2007, Michael J. Harms
# This program is distributed under General Public License v. 3. See the file
# COPYING for a copy of the license.
__description__ = \
"""
pdb_mutator.py
Mutates a residue in a pdb file.
"""
__author__ = "Michael J. Harms"
__date__ = "070729"
__description__ = "Mutates a residue in a pdb file"
import sys, time, string, os
import pdb_atom_renumber, pdb_clean
from helper import container
from pdb_data.common import *
class MutatorError(Exception):
"""
General exception to raise if there is a problem with this module.
"""
pass
def mutateResidue(pdb,residue_number,mutation,chain=None):
"""
Renames residue defined by residue number to residue_name; designed for use
with CHARMM. The residue is "mutated" but all atoms besides N CA C O and
CB are simply removed so that they will be added and minimized in CHARMM.
"""
keep_atoms = ["N ","CA ","C ","O ","CB "]
# Find residue to mutate
residue = [l for l in pdb if int(l[22:26]) == residue_number]
if chain != None:
residue = [l for l in residue if l[21] == chain]
original_aa = residue[0][17:20]
# Do mutation
index = pdb.index(residue[0])
for i, r in enumerate(residue):
residue[i] = "%s%-4s%s" % (r[:17],mutation,r[21:])
pdb[index + i] = residue[i]
# Remove non-backbone/CB atoms
for atom in residue:
if atom[13:16] not in keep_atoms:
pdb.remove(atom)
return pdb, original_aa
def pdbMutator(pdb,residue,mutation,chain=None,run_charmm=True):
"""
Mutate a residue in the pdb file, energy minimizing with CHARMM if
requested.
"""
# grab header
header = [l for l in pdb if l[0:6] not in pdb_clean.COORD_RECORDS and
l[0:6] not in pdb_clean.DEPRECATED_RECORDS]
# Grab coordinates
coord = [l for l in pdb if l[0:6] == "ATOM "]
if pdb_clean.pdbCheck(coord):
err = "There are no ATOM entries in this pdb file!"
raise MutatorError(err)
coord, original_aa = mutateResidue(coord,residue,mutation,chain)
mutation_string = "%s%i%s" % (AA3_TO_AA1[original_aa],residue,
AA3_TO_AA1[mutation])
# Set up log
log = ["REMARK %s introduced by pdb_mutator (harmsm@jhu.edu)\n" % \
mutation_string]
log_fmt = "REMARK - %s\n"
log.append(log_fmt % ("Process time: %s" % time.asctime()))
if chain == None:
log.append(log_fmt % ("Mutation introduced on all chains"))
else:
log.append(log_fmt % ("Mutation introduced on chain %s" % chain))
# Add missing atoms using CHARMM
if run_charmm:
print log_fmt % "Adding mutated side chain using CHARMM",
seqres = [l for l in header if l[0:6] == "SEQRES"]
coord = pdb_clean.addMissingAtoms(coord,seqres)
log.append(log_fmt % "Mutated sidechain built with CHARMM")
# Renumber atoms from 1
coord = pdb_atom_renumber.pdbAtomRenumber(coord)
log.append(log_fmt % "Renumbered atoms from 1")
print log[-1],
# Standardize atom-type on far right pdb column
coord = ["%s %s \n" % (c[:66],c[13]) for c in coord]
log.append(log_fmt % "Atom types were standardized.")
print log[-1],
# Final check
if pdb_clean.pdbCheck(coord):
err = "Unknown error occured and pdb has been mangled!"
raise MutatorError(err)
# Return processed pdb file.
out_pdb = []
out_pdb.extend(log)
out_pdb.extend(header)
out_pdb.extend(coord)
return out_pdb, mutation_string
def main():
"""
To be called if module run from command line.
"""
from helper import cmdline
# Parse command line
cmdline.initializeParser(__description__,__date__)
cmdline.addOption(short_flag="c",
long_flag="chain",
action="store",
default=None,
help="CHAIN to mutate",
nargs=1)
cmdline.addOption(short_flag="r",
long_flag="residue",
action="store",
type="int",
default=None,
help="Residue to mutate (REQUIRED)",
nargs=1)
cmdline.addOption(short_flag="m",
long_flag="mutation",
action="store",
default=None,
help="Three-letter name of mutation (REQUIRED)",
nargs=1)
cmdline.addOption(short_flag="s",
long_flag="simple",
action="store_true",
default=False,
help="No atoms beyond CB added (i.e. no CHARMM)")
file_list, options = cmdline.parseCommandLine()
# Parse command line options
if options.residue == None:
err = "Residue (-r) argument is required!"
raise cmdline.parser.error(err)
else:
residue = options.residue
if options.mutation == None:
err = "Mutation (-m) argument is required!"
raise cmdline.parser.error(err)
else:
mutation = options.mutation
chain = options.chain
run_charmm = not options.simple
for file in file_list:
f = open(file,'r')
pdb = f.readlines()
f.close()
print "Loading %s" % file
pdb_id = file[:-4]
pdb, mutation_string = pdbMutator(pdb,residue,mutation,chain,
run_charmm)
out_file = "%s_%s.pdb" % (pdb_id,mutation_string)
g = open(out_file,"w")
g.writelines(pdb)
g.close()
print "Mutated pdb written to %s" % out_file
if __name__ == "__main__":
main()
|
AndreaEdwards/pdbtools
|
pdb_mutator.py
|
Python
|
gpl-3.0
| 5,939
|
[
"CHARMM"
] |
967629b296ee009d810014c6bc7310e3fe6cfeeaaa75a097391c230dccdf9cc5
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# statusfe - [insert a few words of module description on this line]
# Copyright (C) 2003-2009 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
""" Get frontend status """
import shared.returnvalues as returnvalues
from shared.findtype import is_owner
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.init import initialize_main_variables
from shared.resadm import status_resource
def signature():
"""Signature of the main function"""
defaults = {'unique_resource_name': REJECT_UNSET}
return ['text', defaults]
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id)
output_objects.append({'object_type': 'text', 'text'
: '--------- Trying to get STATUS for frontend ----------'
})
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
unique_resource_name = accepted['unique_resource_name'][-1]
logger.info('%s attempts to get status for frontend at %s',
client_id, unique_resource_name)
if not is_owner(client_id, unique_resource_name,
configuration.resource_home, logger):
output_objects.append({'object_type': 'error_text', 'text'
: 'You must be an owner of '
+ unique_resource_name
+ ' to get status for the resource frontend!'
})
return (output_objects, returnvalues.CLIENT_ERROR)
(status, msg) = status_resource(unique_resource_name,
configuration.resource_home, logger)
if not status:
output_objects.append({'object_type': 'error_text', 'text'
: '%s. Error getting resource status.'
% msg})
return (output_objects, returnvalues.CLIENT_ERROR)
# everything ok
output_objects.append({'object_type': 'text', 'text': '%s' % msg})
return (output_objects, returnvalues.OK)
|
heromod/migrid
|
mig/shared/functionality/statusfe.py
|
Python
|
gpl-2.0
| 3,209
|
[
"Brian"
] |
0464d5e0259fe4f5e5ee0162390708015d1ddae08198f4f729b573d6590254e3
|
# hhfit.py ---
#
# Filename: hhfit.py
# Description:
# Author:
# Maintainer:
# Created: Tue May 21 16:31:56 2013 (+0530)
# Version:
# Last-Updated: Tue Jun 11 16:57:30 2013 (+0530)
# By: subha
# Update #: 34
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Functions for fitting common equations for Hodgkin-Huxley type gate
# equations.
#
#
# Change log:
#
# Tue May 21 16:33:59 IST 2013 - Subha refactored the code from
# converter.py to hhfit.py.
#
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street, Fifth
# Floor, Boston, MA 02110-1301, USA.
#
#
# Code:
import traceback
import warnings
from collections import deque
import numpy as np
from scipy.optimize import curve_fit
from matplotlib import pyplot as plt
def exponential(x, a, k, x0, y0=0):
return a * np.exp(k * (x - x0)) + y0
def sigmoid(x, a, k, x0, y0=0):
return a / (np.exp(k * (x - x0)) + 1.0) + y0
def linoid(x, a, k, x0, y0=0):
"""The so called linoid function. Called explinear in neurml."""
denominator = np.exp(k * (x - x0)) - 1.0
# Linoid often includes a zero denominator - we need to fill those
# points with interpolated values (interpolation is simpler than
# finding limits).
ret = a * (x - x0) / denominator
infidx = np.flatnonzero((ret == np.inf) | (ret == -np.inf))
if len(infidx) > 0:
for ii in infidx:
if ii == 0:
ret[ii] = ret[ii+1] - (ret[ii+2] - ret[ii+1])
elif ii == len(ret):
ret[ii] = ret[ii-1] + (ret[ii-1] - ret[ii-2])
else:
ret[ii] = (ret[ii+1] + ret[ii+2]) * 0.5
return ret + y0
def double_exp(x, a, k1, x1, k2, x2, y0=0):
"""For functions of the form:
a / (exp(k1 * (x - x1)) + exp(k2 * (x - x2)))
"""
ret = np.zeros(len(x))
try:
ret = a / (np.exp(k1 * (x - x1)) + np.exp(k2 * (x - x2))) + y0
except RuntimeWaring as e:
traceback.print_exc()
return ret
# Map from the above functions to corresponding neuroml class
fn_rate_map = {
exponential: 'HHExpRate',
sigmoid: 'HHSigmoidRate',
linoid: 'HHExpLinearRate',
double_exp: None,
}
# These are default starting parameter values
fn_p0_map = {
exponential: (1.0, -100, 20e-3, 0.0),
sigmoid: (1.0, 1.0, 0.0, 0.0),
linoid: (1.0, 1.0, 0.0, 0.0),
double_exp: (1e-3, -1.0, 0.0, 1.0, 0.0, 0.0),
}
def randomized_curve_fit(fn, x, y, maxiter=10, best=True):
"""Repeatedly search for a good fit for common gate functions for
HHtype channels with randomly generated initial parameter
set. This function first tries with default p0 for fn. If that
fails to find a good fit, (correlation coeff returned by curve_fit
being inf is an indication of this), it goes on to generate random
p0 arrays and try scipy.optimize.curve_fit using this p0 until it
finds a good fit or the number of iterations reaches maxiter.
Ideally we should be doing something like stochastic gradient
descent, but I don't know if that might have performance issue in
pure python. The random parameterization in the present function
uses uniformly distributed random numbers within the half-open
interval [min(x), max(x)). The reason for choosing this: the
offset used in the exponential parts of Boltzman-type/HH-type
equations are usually within the domain of x. I also invert the
second entry (p0[1], because it is always (one of) the scale
factor(s) and usually 1/v for some v in the domain of x. I have
not tested the utility of this inversion. Even without this
inversion, with maxiter=100 this function is successful for the
test cases.
Parameters
----------
x: ndarray
values of the independent variable
y: ndarray
sample values of the dependent variable
maxiter: int
maximum number of iterations
best: bool
if true, repeat curve_fit for maxiter and return the case of least
squared error.
Returns
-------
The return value of scipy.optimize.curve_fit which succeed, or the
last call to it if maxiter iterations is reached..
"""
bad = True
p0 = fn_p0_map[fn]
p = None
p_best = None
min_err = 1e10 # A large value as placeholder
for ii in range(maxiter):
try:
p = curve_fit(fn, x, y, p0=p0, full_output=True)
except (RuntimeError, RuntimeWarning) as e:
p = None
# The last entry returned by scipy.optimize.leastsq used by
# curve_fit is 1, 2, 3 or 4 if it succeeds.
bad = (p is None) or np.any(p[1] == np.inf) or (p[-1] not in [1, 2, 3, 4])
if not bad:
if not best:
return p
err = sum((y - fn(x, *tuple(p[0])))**2)
if err < min_err:
min_err = err
p_best = p
p0 = np.random.uniform(low=min(x), high=max(x), size=len(fn_p0_map[fn]))
if p0[1] != 0.0:
p0[1] = 1 / p0[1] # k = 1/v_scale - could help faster convergence
if p_best is None:
if p is not None:
msg = p[-2]
else:
msg = ''
warnings.warn('Maximum iteration %d reached. Could not find a decent fit. %s' % (maxiter, msg), RuntimeWarning)
return p_best
def find_ratefn(x, y, **kwargs):
"""Find the function that fits the rate function best. This will try
exponential, sigmoid and linoid and return the best fit.
Needed until NeuroML2 supports tables or MOOSE supports
functions.
Parameters
----------
x: 1D array
independent variable.
y: 1D array
function values.
**kwargs: keyword arguments
passed to randomized_curve_fit.
Returns
-------
best_fn: function
the best fit function.
best_p: tuple
the optimal parameter values for the best fit function.
"""
rms_error = 1e10 # arbitrarily setting this
best_fn = None
best_p = None
for fn in fn_rate_map.keys():
p = randomized_curve_fit(fn, x, y, **kwargs)
if p is None:
continue
popt = p[0]
pcov = p[1]
error = y - fn(x, *popt)
erms = np.sqrt(np.mean(error**2))
# Ideally I want a fuzzy selection criterion here - a
# preference for fewer parameters, but if the errors are
# really small then we go for functions with more number of
# parameters. Some kind of weighted decision would have been
# nice. I am arbitrarily setting less than 0.1% relative error
# as a strong argument for taking a longer parameter function
# as a really better fit. Even with 1%, double exponential
# betters detected sigmoid for sigmoid curve in test case.
if erms < rms_error and \
((best_p is None) or \
len(popt) <= len(best_p) or \
erms / (max(y) - min(y)) < 0.001):
rms_error = erms
best_fn = fn
best_p = popt
return (best_fn, best_p)
#
# hhfit.py ends here
|
dilawar/moose-full
|
moose-core/python/moose/neuroml2/hhfit.py
|
Python
|
gpl-2.0
| 7,697
|
[
"MOOSE"
] |
d8ec1aa7eef7b969dce6ef5868fbd05d0cfec0ee299fbb03be0ff6b4b5b6b244
|
#!/usr/bin/env python
import os
import sys
import pysam
import re
import subprocess
import argparse
import multiprocessing as mp
import scipy.stats as ss
import numpy as np
import skbio.alignment as skalign
import skbio.sequence as skseq
from collections import Counter
from uuid import uuid4
from time import sleep
import logging
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
header = [
'Chromosome',
'RMSK_Start',
'RMSK_End',
'Orientation',
'Name',
'Junc_5p',
'Junc_3p',
'TSD_Start_5p',
'TSD_End_5p',
'TSD_Start_3p',
'TSD_End_3p',
'VAF',
'TSD_seq',
'Empty_Site_Consensus'
]
class Block:
def __init__(self, tstart, tend):
self.tstart = min(int(tstart), int(tend))
self.tend = max(int(tstart), int(tend))
def length(self):
return self.tend - self.tstart
def __str__(self):
return str(self.tstart) + ' ' + str(self.tend)
def __lt__(self, other):
return self.tstart < other.tstart
class PSL:
def __init__(self, rec, consensus):
# psl spec: http://www.ensembl.org/info/website/upload/psl.html
(self.matches, self.misMatches, self.repMatches, self.nCount, self.qNumInsert, self.qBaseInsert,
self.tNumInsert, self.tBaseInsert, self.strand, self.qName, self.qSize, self.qStart, self.qEnd,
self.tName, self.tSize, self.tStart, self.tEnd, self.blockCount, self.blockSizes, self.qStarts,
self.tStarts) = rec.strip().split()
self.cons = consensus
self.rec = rec.strip()
self.tBlocks = []
for bsize, tstart in zip(self.blockSizes.split(',')[:-1], self.tStarts.split(',')[:-1]): # [:-1] due to trailing comma
self.tBlocks.append(Block(int(tstart), int(tstart) + int(bsize)))
self.tBlocks.sort()
self.tName = self.tName.replace('chr', '')
self.tStart, self.tEnd, self.qStart, self.qEnd = map(int, (self.tStart, self.tEnd, self.qStart, self.qEnd))
self.qSize, self.tSize = map(int, (self.qSize, self.tSize))
if self.qStart > self.qEnd:
self.qStart, self.qEnd = self.qEnd, self.qStart
if self.tStart > self.tEnd:
self.tStart, self.tEnd = self.tEnd, self.tStart
def match(self, chrom, pos, window=0):
''' return True if chrom:pos intersects BLAT hit +/- window '''
chrom = chrom.replace('chr', '')
if chrom != self.tName:
return False
if int(pos) >= int(self.tStart)-window and int(pos) <= int(self.tEnd)+window:
return True
return False
def refspan(self):
''' return footprint of match relative to referece genome '''
return self.tEnd - self.tStart
def score(self):
''' adapted from https://genome.ucsc.edu/FAQ/FAQblat.html#blat4 '''
return (int(self.matches) + (int(self.repMatches)>>1)) - int(self.misMatches) - int(self.qNumInsert) - int(self.tNumInsert)
def pctmatch(self):
''' adapted from https://genome.ucsc.edu/FAQ/FAQblat.html#blat4 '''
qAliSize = int(self.qEnd) - int(self.qStart)
tAliSize = int(self.tEnd) - int(self.tStart)
if min(qAliSize, tAliSize) <= 0:
return 0.0
sizeDif = abs(qAliSize - tAliSize)
total = int(self.matches) + int(self.repMatches) + int(self.misMatches)
if total > 0:
return 1.0-float((int(self.misMatches) + int(self.qNumInsert) + int(self.tNumInsert) + round(3*log(1+sizeDif)))) / float(total)
return 0.0
def __lt__(self, other):
''' used for ranking BLAT hits '''
return self.score() > other.score()
def __str__(self):
return self.rec
class SortableRead:
def __init__(self, read):
self.read = read
self.seq = read.seq
self.seqstart = read.reference_start-read.query_alignment_start
def __gt__(self, other):
if self.read.tid == other.read.tid:
return self.seqstart > other.seqstart
else:
return self.read.tid > other.read.ti
class SplitRead:
''' store information about split read alignment '''
def __init__(self, chrom, read, bamfn, minqual):
self.uuid = str(uuid4())
self.chrom = chrom
self.read = read
self.bamfn = os.path.basename(bamfn)
self.minqual = minqual
self.cliplen = len(read.seq) - len(read.query_alignment_sequence)
self.breakleft = False
self.breakright = False
self.breakpos = None
if read.qstart < read.rlen - read.qend:
self.breakpos = read.get_reference_positions()[-1] # breakpoint on right
self.breakright = True
else:
self.breakpos = read.get_reference_positions()[0] # breakpoint on left
self.breakleft = True
assert self.breakpos is not None
assert self.breakleft != self.breakright
def getRG(self):
''' return read group from RG aux tag '''
for tag, val in self.read.tags:
if tag == 'RG': return val
return None
def __gt__(self, other):
''' enables sorting of SplitRead objects '''
if self.chrom == other.chrom:
return self.breakpos > other.breakpos
else:
return self.chrom > other.chrom
def __str__(self):
dir = 'left'
if self.breakright: dir='right'
return ' '.join(map(str, ('SplitRead:', self.chrom, self.breakpos, self.cliplen, self.read.qname, dir)))
class ReadCluster:
''' parent class for read clusters '''
def __init__(self, firstread=None):
self.uuid = str(uuid4())
self.reads = []
self.start = 0
self.end = 0
self.median = 0
self.chrom = None
if firstread is not None:
self.add_read(firstread)
def add_read(self, r):
''' add a read and update '''
self.reads.append(r)
if self.chrom is None: self.chrom = r.chrom
assert self.chrom == r.chrom # clusters can't include > 1 chromosome
''' update statistics '''
positions = []
positions += [pos for r in self.reads for pos in r.read.positions]
self.reads.sort()
self.start = max(positions)
self.end = min(positions)
self.median = int(np.median(positions))
def readgroups(self):
c = Counter([r.getRG() for r in self.reads])
return [str(k[0]) + '|' + str(k[1]) for k in zip(c.keys(), c.values())]
def bamfiles(self):
c = Counter([r.bamfn for r in self.reads])
return [str(k[0]) + '|' + str(k[1]) for k in zip(c.keys(), c.values())]
def find_extrema(self):
''' return leftmost and rightmost aligned positions in cluster vs. reference '''
positions = []
positions += [pos for r in self.reads for pos in r.read.positions]
return min(positions), max(positions)
def avg_matchpct(self):
return np.mean([read_matchpct(r.read) for r in self.reads])
def __len__(self):
return len(self.reads)
class SplitCluster(ReadCluster):
''' store and manipulate groups of SplitRead objects '''
def add_splitread(self, sr):
''' add a SplitRead and update '''
self.reads.append(sr)
if self.chrom is None: self.chrom = sr.chrom
assert self.chrom == sr.chrom # clusters can't include > 1 chromosome
''' update statistics '''
self.reads.sort()
self.start = self.reads[0].breakpos
self.end = self.reads[-1].breakpos
self.median = self.reads[int(len(self)/2)].breakpos
def subcluster_by_breakend(self, breakends, direction='both'):
''' return a new cluster containing only reads with breakpoints in passed list '''
new = SplitCluster()
assert direction in ('both', 'left', 'right')
if direction == 'both':
[new.add_splitread(sr) for sr in self.reads if sr.breakpos in breakends]
if direction == 'left':
[new.add_splitread(sr) for sr in self.reads if sr.breakpos in breakends and sr.breakleft]
if direction == 'right':
[new.add_splitread(sr) for sr in self.reads if sr.breakpos in breakends and sr.breakright]
return new
def consensus(self, minscore = 0.9):
''' build consensus from sorted aligned reads iteratively '''
S = -np.ones((256, 256)) + 2 * np.identity(256)
S = S.astype(np.int16)
minqual = self.reads[0].minqual
sortable_reads = [SortableRead(sr.read) for sr in self.reads]
seqs = [qualtrim(sorted_read.read, minqual=minqual) for sorted_read in sorted(sortable_reads)]
seqs = [s for s in seqs if len(s) > 20]
if len(seqs) == 0:
return '', 0.0
if len(seqs) == 1: # no consensus necessary
return seqs[0], 1.0
uniq_seqs = [seqs[0]]
for i, seq in enumerate(seqs[1:], start=1):
if seq != seqs[i-1]:
uniq_seqs.append(seq)
if len(uniq_seqs) == 1: # all seqs were the same!
return uniq_seqs[0], 1.0
cons = uniq_seqs[0]
scores = []
if len(uniq_seqs) > 1000:
uniq_seqs = [uniq_seqs[u] for u in sorted(np.random.choice(range(len(uniq_seqs)), size=1000))]
for seq in uniq_seqs[1:]:
cons = cons.replace('N', 'A')
seq = cons.replace('N', 'A')
s1 = skseq.DNA(cons)
s2 = skseq.DNA(seq)
try:
aln_res = skalign.local_pairwise_align_ssw(s1, s2)
except IndexError:
return cons, 0.0
aln_tab = aln_res[0]
s1_aln, s2_aln = aln_res[2]
a1 = cons[s1_aln[0]:s1_aln[1]+1]
score = 0.0
if aln_tab.shape.position > 10: # param?
score = sum(aln_tab.conservation(gap_mode='include')==1.)/aln_tab.shape.position
if re.search(a1, cons):
cons_start, cons_end = s1_aln[0], s1_aln[1]+1
if score >= minscore and cons_end > len(cons)-5:
scores.append(score)
align_end = s2_aln[1]+1
cons += seq[align_end:]
if scores:
return cons, np.mean(scores)
else:
return cons, 0.0
def all_breakpoints(self):
''' returns uniquified list of breakpoints '''
return list(set([read.breakpos for read in self.reads]))
def median_D(self):
return np.median([splitqual(sr.read) for sr in self.reads])
def min_cliplen(self):
return min([sr.cliplen for sr in self.reads])
def max_cliplen(self):
return max([sr.cliplen for sr in self.reads])
def __str__(self):
break_count = Counter([read.breakpos for read in self.reads])
return '\t'.join(map(str, ('SplitCluster:', self.chrom, self.start, self.end, len(self.reads), break_count)))
class BreakEnd:
''' coallate information about a breakend '''
def __init__(self, chrom, breakpos, cluster, consensus, score, direction):
self.uuid = str(uuid4())
self.cluster = cluster
self.chrom = chrom
self.breakpos = breakpos
self.consensus = consensus
self.consscore = score
self.direction = direction
def __len__(self):
return len(self.cluster)
def __str__(self):
return '%s:%d:%s:%s' % (self.chrom, self.breakpos, self.consensus, self.direction)
def splitqual(read):
''' return Mann-Whitney P for clipped vs unclipped quals '''
breakpos = None
breakpos = read.get_aligned_pairs()[-1][0] # breakpoint on right
q1 = list(map(ord, list(read.qual[:breakpos])))
q2 = list(map(ord, list(read.qual[breakpos:])))
if min(q1) == max(q1) == min(q2) == max(q2):
return 1.0
return ss.mannwhitneyu(q1, q2)[1]
def guess_minqual(bam):
minscore = None
n = 0
for read in bam.fetch():
n += 1
m = min([ord(q) for q in list(read.qual)])
if minscore is None or minscore > m:
minscore = m
if n > 10000: break
return minscore
def fetch_clipped_reads(bam, chrom, start, end):
''' Return list of SplitRead objects '''
splitreads = []
start = int(start)
end = int(end)
assert start < end
if start < 0: start = 0
minqual = guess_minqual(bam) # used for quality trimming when building consensus
if chrom not in bam.references:
return splitreads
for read in bam.fetch(chrom, start, end):
if not read.is_unmapped and not read.is_duplicate and read.mapq > 0:
if read.rlen - read.alen >= 3: # 'soft' clipped?
# length of 'minor' clip
altclip = min(read.qstart, read.rlen-read.qend)
# junk bases
N_count = 0
if 'N' in read.seq: N_count = Counter(read.seq)['N']
if altclip <= 2: # could add as a filter
if N_count <= 2 and splitqual(read) >= 0.01:
chrom = str(bam.getrname(read.tid))
if len(read.get_reference_positions()) > 0:
splitreads.append(SplitRead(chrom, read, bam.filename, minqual))
return splitreads
def build_sr_clusters(splitreads, searchdist=100): # TODO PARAM,
''' cluster SplitRead objects into Cluster objects and return a list of them '''
clusters = []
for sr in splitreads:
if len(clusters) == 0:
clusters.append(SplitCluster(sr))
elif clusters[-1].chrom != sr.chrom:
clusters.append(SplitCluster(sr))
else:
if abs(clusters[-1].median - sr.breakpos) > searchdist:
clusters.append(SplitCluster(sr))
else:
clusters[-1].add_splitread(sr)
return clusters
def build_breakends(cluster, tmpdir='/tmp'):
''' returns list of breakends from cluster '''
breakends = []
for breakpos in cluster.all_breakpoints():
for dir in ('left', 'right'):
subcluster = cluster.subcluster_by_breakend([breakpos], direction=dir)
if len(subcluster) >= 4 and subcluster.max_cliplen() >= 10:
seq = subcluster.reads[0].read.seq
score = 1.0
if len(subcluster) > 1: seq, score = subcluster.consensus()
N_count = 0
if 'N' in seq: N_count = Counter(seq)['N']
if seq != '' and score >= 0.9 and N_count <= 3:
breakends.append(BreakEnd(cluster.chrom, breakpos, subcluster, seq, score, dir))
return breakends
def qualtrim(read, minqual=35):
''' return quality-trimmed sequence given a pysam.AlignedSegment '''
q = [ord(b)-minqual for b in list(read.qual)]
for i in range(0,len(q)-4): # sliding window, 4bp
if np.mean(q[i:i+4]) < 5:
return read.seq[:i]
return read.seq
def locate_subseq(longseq, shortseq):
''' return (start, end) of shortseq in longseq '''
assert len(longseq) >= len(shortseq), 'orient_subseq: %s < %s' % (longseq, shortseq)
match = re.search(shortseq, longseq)
if match is not None:
return sorted((match.start(0), match.end(0)))
return None
def start_blat_server(blatref, port=9999):
# parameters from https://genome.ucsc.edu/FAQ/FAQblat.html#blat5
server_up = True
dummy_fa = '/tmp/' + str(uuid4()) + '.fa'
dummy_psl = dummy_fa.replace('.fa', '.psl')
with open(dummy_fa, 'w') as dout:
dout.write('>\n' + 'A'*100)
poll_cmd = ['gfClient', 'localhost', str(port), blatref, dummy_fa, dummy_psl]
poll_time = 10
t = subprocess.Popen(poll_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
for line in t.stderr:
line = line.decode()
if line.startswith('Sorry'):
server_up = False
logger.info("No BLAT server found, starting one up...")
cmd = ['gfServer', 'start', 'localhost', str(port), '-stepSize=5', blatref]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
logger.info("Found BLAT server!")
while not server_up:
started = True
t = subprocess.Popen(poll_cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
for line in t.stderr:
line = line.decode()
if line.startswith('Sorry'):
started = False
if not started:
logger.info("waiting for BLAT server to start...")
sleep(poll_time)
else:
server_up=True
logger.info("BLAT for %s server up" % blatref)
return port
def blat(fasta, outpsl, port=9999, minScore=0, maxIntron=None):
''' BLAT using gfClient utility '''
cmd = ['gfClient', 'localhost', str(port), '-nohead']
if maxIntron is not None:
cmd.append('-maxIntron=' + str(maxIntron))
if minScore is not None:
cmd.append('-minScore=' + str(minScore))
cmd += ['/', fasta, outpsl]
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
for line in p.stdout:
pass
def eval_break(breakend, direct, elt_chrom, elt_start, elt_end):
if breakend.direction != direct:
return False
out_psl = None
fa_tmp = '/tmp/' + str(uuid4()) + '.fa'
psl_tmp = '/tmp/' + str(uuid4()) + '.psl'
with open(fa_tmp, 'w') as fa_out:
fa_out.write('>%s_%d\n%s\n' % (breakend.chrom, breakend.cluster.start, breakend.consensus))
blat(fa_tmp, psl_tmp)
with open(psl_tmp, 'r') as psl:
for line in psl:
rec = PSL(line.strip(), breakend.consensus)
if float(rec.matches) / len(breakend.consensus) > 0.9:
if elt_chrom.lstrip('chr') == rec.tName.lstrip('chr') and int(rec.tStart) < elt_start + 50 and int(rec.tEnd) > elt_end-50:
out_psl = rec
os.remove(fa_tmp)
os.remove(psl_tmp)
return out_psl
def tsd(psl, ref, b_left_init=0, b_right_init=0, max_iter=100):
b_left_pos = b_left_init
b_right_pos = b_right_init
if int(psl.blockCount) < 2:
return 0,0,0,0,'NA'
# pick largest gap between blocks
gap = 0
for i, block in enumerate(psl.tBlocks[1:], 1):
prev_block = psl.tBlocks[i-1]
if block.tstart - prev_block.tend > gap:
gap = block.tstart - prev_block.tend
b_left_pos = b_left_init + prev_block.tend
b_right_pos = b_right_init + block.tstart
chrom = psl.tName.lstrip('chr')
b_left = b_left_pos
b_right = b_right_pos
nt_l = ref.fetch(chrom, b_left, b_left+1)
nt_r = ref.fetch(chrom, b_right, b_right+1)
if nt_l != nt_r:
return b_left, b_left, b_right, b_right, 'NA'
else:
i = 0
while nt_l == nt_r and i < max_iter:
b_left -= 1
b_right -= 1
if b_left < 0:
b_left = 0
if b_right < 0:
b_right = 0
i += 1
nt_l = ref.fetch(chrom, b_left, b_left+1)
nt_r = ref.fetch(chrom, b_right, b_right+1)
if i >= max_iter:
return b_left, b_left, b_right, b_right, 'NA'
l_b_start = b_left+1
r_b_start = b_right+1
b_left = b_left_pos
b_right = b_right_pos
nt_l = ref.fetch(chrom, b_left, b_left+1)
nt_r = ref.fetch(chrom, b_right, b_right+1)
i = 0
while nt_l == nt_r and i < max_iter:
b_left += 1
b_right += 1
if b_left < 0:
b_left = 0
if b_right < 0:
b_right = 0
i += 1
nt_l = ref.fetch(chrom, b_left, b_left+1)
nt_r = ref.fetch(chrom, b_right, b_right+1)
if i >= max_iter:
return b_left, b_left, b_right, b_right, 'NA'
l_b_end = b_left
r_b_end = b_right
if l_b_start < 0:
l_b_start = 0
tsd_seq = ref.fetch(chrom, l_b_start, l_b_end)
if tsd_seq:
return l_b_start, l_b_end, r_b_start, r_b_end, tsd_seq
else:
return l_b_left, l_b_left, r_b_start, r_b_end, 'NA'
def getVAF(bam, chrom, poslist):
''' return number of reads supporting alt (insertion), ref (reference) and vaf (variant allele fraction) '''
poslist = map(int, poslist)
alt, ref = break_count(bam, chrom, poslist)
vaf = 0.0
if float(ref+alt) > 0:
vaf = float(alt)/float(alt+ref)
return alt, ref, vaf
def break_count(bam, chrom, poslist, minpad=5, flex=1, minmapq=10):
''' ref = number of reads spanning TSD, alt = number of reads clipped at breakpoint in poslist '''
altcount = 0
refcount = 0
discards = 0
poslist = list(poslist)
tsd_start = min(poslist)
tsd_end = max(poslist)
tsd_len = tsd_end - tsd_start
if tsd_start < minpad: tsd_start = minpad
for read in bam.fetch(chrom, tsd_start-minpad, tsd_end+minpad):
if read.is_unmapped or read.is_duplicate:
continue
if read.mapq < minmapq:
continue
rclip = len(read.seq) - read.query_alignment_end
lclip = read.query_alignment_start
rbreak = 0
lbreak = 0
if rclip > max(tsd_len, minpad):
rbreak = read.reference_end
if lclip > max(tsd_len, minpad):
lbreak = read.reference_start
support_alt = False
for pos in poslist: # does this read support a breakpoint in the list?
if (rbreak >= pos-flex and rbreak <= pos+flex) or (lbreak >= pos-flex and lbreak <= pos+flex):
support_alt = True
if support_alt:
altcount += 1
else:
#for pos in poslist: # does this read span a breakpoint in the list?
if read.alen == len(read.seq):
if read.reference_start < tsd_start and read.reference_end > tsd_end: # span TSD
refcount += 1
return altcount, refcount
def ref_ins(args, chrom, start, end, orient, name):
bam = pysam.AlignmentFile(args.bam)
ref = pysam.Fastafile(args.fastaref)
# Find the junction
start_splits = fetch_clipped_reads(bam, chrom, start-25, start+25)
start_splits.sort()
start_clusters = build_sr_clusters(start_splits)
start_breaks = [build_breakends(c) for c in start_clusters]
psl_rec = None
for be in start_breaks:
for b in be:
psl_rec = eval_break(b, 'right', chrom, start, end)
if psl_rec is None:
end_splits = fetch_clipped_reads(bam, chrom, end-25, end+25)
end_splits.sort()
end_clusters = build_sr_clusters(end_splits)
end_breaks = [build_breakends(c) for c in end_clusters]
for be in end_breaks:
for b in be:
psl_rec = eval_break(b, 'left', chrom, start, end)
# Locate TSD if possible:
# may need to jiggle the start location for TSD search
tries = [[0,0], [1,1], [-1,-1], [0,-1], [-1,0], [1,0], [0,1]]
if psl_rec:
max_tsd = -1
best_tsd = []
while len(tries) > 0:
tsd_result = tsd(psl_rec, ref, b_left_init=tries[0][0], b_right_init=tries[0][1])
if tsd_result[1] - tsd_result[0] > max_tsd:
best_tsd = tsd_result
max_tsd = tsd_result[1] - tsd_result[0]
if len(tries) > 1:
tries = tries[1:]
else:
tries = []
l_tsd_start, l_tsd_end, r_tsd_start, r_tsd_end, tsd_seq = best_tsd
l_refcount, l_altcount, l_vaf = getVAF(bam, chrom, (l_tsd_start, l_tsd_end))
r_refcount, r_altcount, r_vaf = getVAF(bam, chrom, (r_tsd_start, r_tsd_end))
vaf = ['0.0']
if l_altcount + r_altcount + l_refcount + r_refcount > 0:
vaf = [str(float(l_altcount + r_altcount) / float(l_altcount + r_altcount + l_refcount + r_refcount))]
junc_5p = l_tsd_end
junc_3p = r_tsd_start
tsd_start_5p = l_tsd_start
tsd_end_5p = l_tsd_end
tsd_start_3p = r_tsd_start
tsd_end_3p = r_tsd_end
vaf_5p = l_vaf
vaf_3p = r_vaf
if orient == '-':
junc_5p, junc_3p = junc_3p, junc_5p
tsd_start_5p, tsd_start_3p = tsd_start_3p, tsd_start_5p
tsd_end_5p, tsd_end_3p = tsd_end_3p, tsd_end_5p
vaf_5p, vaf_3p = vaf_3p, vaf_5p
if args.persample is not None:
vaf = []
with open(args.persample) as samples:
for line in samples:
sbamfn, sname = line.strip().split()
sbam = pysam.AlignmentFile(sbamfn)
l_refcount, l_altcount, l_vaf = getVAF(sbam, chrom, (l_tsd_start, l_tsd_end))
r_refcount, r_altcount, r_vaf = getVAF(sbam, chrom, (r_tsd_start, r_tsd_end))
svaf = 0.0
if l_altcount + r_altcount + l_refcount + r_refcount > 0:
svaf = float(l_altcount + r_altcount) / float(l_altcount + r_altcount + l_refcount + r_refcount)
vaf.append('%s|%f' % (sname, svaf))
vaf = ','.join(vaf)
out = (chrom, start, end, orient, name, junc_5p, junc_3p, tsd_start_5p, tsd_end_5p, tsd_start_3p, tsd_end_3p, vaf, tsd_seq, psl_rec.cons)
return out
return None
def main(args):
logger.info('%s started with cmd: %s' % (sys.argv[0], ' '.join(sys.argv)))
p = start_blat_server(args.blatref)
print('\t'.join(header))
pool = mp.Pool(processes=int(args.procs))
results = []
with open(args.ins) as bed:
for line in bed:
chrom, start, end, orient, name = line.strip().split()[:5]
start = int(start)
end = int(end)
assert orient in ('+','-')
res = pool.apply_async(ref_ins, [args, chrom, start, end, orient, name])
results.append(res)
output = []
for res in results:
out = res.get()
if out is not None:
output.append(out)
for out in output:
print('\t'.join(map(str, out)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Find breakpoints, TSD, VAF, and read counts for reference insertions')
parser.add_argument('-b', '--bam', required=True, help='initial BAM for deletion discovery')
parser.add_argument('-i', '--ins', required=True, help='insertion locations (five columns required: chrom, start, end, strand, annotation')
parser.add_argument('-r', '--blatref', required=True, help='BLAT reference')
parser.add_argument('-f', '--fastaref', required=True, help='samtools faidx indexed genome fasta')
parser.add_argument('-p', '--procs', default=1, help='split work across multiple processes')
parser.add_argument('--port', default=9999)
parser.add_argument('--persample', default=None, help='List of files (2 column: BAM, Name) for per-sample information')
args = parser.parse_args()
main(args)
|
adamewing/tebreak
|
scripts/refelts.py
|
Python
|
mit
| 27,200
|
[
"pysam"
] |
a4933b5572575ee74df45d9d30b68ff6905525cd0d9e6f06d2025f20c047fda7
|
"""
Compute Bra-ket averaged Taylor expansion integrals over trajectories
traveling on adiabataic potentials
"""
import numpy as np
import nomad.compiled.nuclear_gaussian as nuclear
# Determines the Hamiltonian symmetry
hermitian = True
# Returns functional form of bra function ('dirac_delta', 'gaussian')
basis = 'gaussian'
def elec_overlap(t1, t2):
""" Returns < Psi | Psi' >, the electronic overlap integral of two trajectories"""
return float(t1.state == t2.state)
def nuc_overlap(t1, t2):
""" Returns < Chi | Chi' >, the nuclear overlap integral of two trajectories"""
return nuclear.overlap(t1.phase(),t1.widths(),t1.x(),t1.p(),
t2.phase(),t2.widths(),t2.x(),t2.p())
def traj_overlap(t1, t2):
""" Returns < Chi | Chi' >, the total overlap integral of two trajectories"""
return elec_overlap(t1,t2) * nuclear.overlap(t1.phase(),t1.widths(),t1.x(),t1.p(),
t2.phase(),t2.widths(),t2.x(),t2.p())
def s_integral(t1, t2, nuc_ovrlp, elec_ovrlp):
""" Returns < Psi | Psi' >, the overlap of the nuclear
component of the wave function only"""
return nuc_ovrlp * elec_ovrlp
def t_integral(t1, t2, kecoef, nuc_ovrlp, elec_ovrlp):
"""Returns kinetic energy integral over trajectories."""
if elec_ovrlp == 0.:
return 0.j
ke = nuclear.deld2x(nuc_ovrlp,t1.widths(),t1.x(),t1.p(),
t2.widths(),t2.x(),t2.p())
return -np.dot(ke, kecoef)
def sdot_integral(t1, t2, nuc_ovrlp, elec_ovrlp):
"""Returns the matrix element <Psi_1 | d/dt | Psi_2>."""
if elec_ovrlp == 0.:
return elec_ovrlp
deldx = nuclear.deldx(nuc_ovrlp,t1.widths(),t1.x(),t1.p(),
t2.widths(),t2.x(),t2.p())
deldp = nuclear.deldp(nuc_ovrlp,t1.widths(),t1.x(),t1.p(),
t2.widths(),t2.x(),t2.p())
sdot = (np.dot(deldx,t2.velocity()) + np.dot(deldp,t2.force()) +
1j * t2.phase_dot() * nuc_ovrlp)
return sdot
|
mschuurman/FMSpy
|
nomad/integrals/fms.py
|
Python
|
lgpl-3.0
| 2,060
|
[
"Gaussian"
] |
86797c56b773f39de1f00baa90924e6acf95243841c72d7464500060221d8928
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2006 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id$
"""Tools/Analysis and Exploration/Compare Individual Events"""
#------------------------------------------------------------------------
#
# python modules
#
#------------------------------------------------------------------------
import os
from collections import defaultdict
#------------------------------------------------------------------------
#
# GNOME/GTK modules
#
#------------------------------------------------------------------------
from gi.repository import Gtk
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.filters import GenericFilter, rules
from gramps.gui.filters import build_filter_model
from gramps.gen.sort import Sort
from gramps.gen.utils.file import get_unicode_path_from_file_chooser
from gramps.gui.utils import ProgressMeter
from gramps.gen.utils.docgen import ODSTab
from gramps.gen.const import CUSTOM_FILTERS, URL_MANUAL_PAGE
from gramps.gen.errors import WindowActiveError
from gramps.gen.datehandler import get_date
from gramps.gui.dialog import WarningDialog
from gramps.gui.plug import tool
from gramps.gen.plug.report import utils as ReportUtils
from gramps.gui.display import display_help
from gramps.gui.managedwindow import ManagedWindow
from gramps.gen.ggettext import sgettext as _
from gramps.gui.glade import Glade
from gramps.gui.editors import FilterEditor
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
WIKI_HELP_PAGE = '%s_-_Tools' % URL_MANUAL_PAGE
WIKI_HELP_SEC = _('manual|Compare_Individual_Events...')
#------------------------------------------------------------------------
#
# EventCmp
#
#------------------------------------------------------------------------
class TableReport(object):
"""
This class provides an interface for the spreadsheet table
used to save the data into the file.
"""
def __init__(self,filename,doc):
self.filename = filename
self.doc = doc
def initialize(self,cols):
self.doc.open(self.filename)
self.doc.start_page()
def finalize(self):
self.doc.end_page()
self.doc.close()
def write_table_data(self,data,skip_columns=[]):
self.doc.start_row()
index = -1
for item in data:
index += 1
if index not in skip_columns:
self.doc.write_cell(item)
self.doc.end_row()
def set_row(self,val):
self.row = val + 2
def write_table_head(self, data):
self.doc.start_row()
map(self.doc.write_cell, data)
self.doc.end_row()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class EventComparison(tool.Tool,ManagedWindow):
def __init__(self, dbstate, uistate, options_class, name, callback=None):
self.dbstate = dbstate
self.uistate = uistate
tool.Tool.__init__(self,dbstate, options_class, name)
ManagedWindow.__init__(self, uistate, [], self)
self.qual = 0
self.filterDialog = Glade(toplevel="filters")
self.filterDialog.connect_signals({
"on_apply_clicked" : self.on_apply_clicked,
"on_editor_clicked" : self.filter_editor_clicked,
"on_help_clicked" : self.on_help_clicked,
"destroy_passed_object" : self.close,
"on_write_table" : self.__dummy,
})
window = self.filterDialog.toplevel
window.show()
self.filters = self.filterDialog.get_object("filter_list")
self.label = _('Event comparison filter selection')
self.set_window(window,self.filterDialog.get_object('title'),
self.label)
self.on_filters_changed('Person')
uistate.connect('filters-changed', self.on_filters_changed)
self.show()
def __dummy(self, obj):
"""dummy callback, needed because widget is in same glade file
as another widget, so callbacks must be defined to avoid warnings.
"""
pass
def on_filters_changed(self, name_space):
if name_space == 'Person':
all_filter = GenericFilter()
all_filter.set_name(_("Entire Database"))
all_filter.add_rule(rules.person.Everyone([]))
self.filter_model = build_filter_model('Person', [all_filter])
self.filters.set_model(self.filter_model)
self.filters.set_active(0)
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def build_menu_names(self, obj):
return (_("Filter selection"),_("Event Comparison tool"))
def filter_editor_clicked(self, obj):
try:
FilterEditor('Person',CUSTOM_FILTERS,
self.dbstate,self.uistate)
except WindowActiveError:
pass
def on_apply_clicked(self, obj):
cfilter = self.filter_model[self.filters.get_active()][1]
progress_bar = ProgressMeter(_('Comparing events'),'')
progress_bar.set_pass(_('Selecting people'),1)
plist = cfilter.apply(self.db,
self.db.iter_person_handles())
progress_bar.step()
progress_bar.close()
self.options.handler.options_dict['filter'] = self.filters.get_active()
# Save options
self.options.handler.save_options()
if len(plist) == 0:
WarningDialog(_("No matches were found"))
else:
DisplayChart(self.dbstate,self.uistate,plist,self.track)
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def by_value(first,second):
return cmp(second[0],first[0])
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
def fix(line):
l = line.strip().replace('&','&').replace('>','>')
return l.replace(l,'<','<').replace(l,'"','"')
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class DisplayChart(ManagedWindow):
def __init__(self,dbstate,uistate,people_list,track):
self.dbstate = dbstate
self.uistate = uistate
ManagedWindow.__init__(self, uistate, track, self)
self.db = dbstate.db
self.my_list = people_list
self.row_data = []
self.save_form = None
self.topDialog = Glade()
self.topDialog.connect_signals({
"on_write_table" : self.on_write_table,
"destroy_passed_object" : self.close,
"on_help_clicked" : self.on_help_clicked,
"on_apply_clicked" : self.__dummy,
"on_editor_clicked" : self.__dummy,
})
window = self.topDialog.toplevel
window.show()
self.set_window(window, self.topDialog.get_object('title'),
_('Event Comparison Results'))
self.eventlist = self.topDialog.get_object('treeview')
self.sort = Sort(self.db)
self.my_list.sort(self.sort.by_last_name)
self.event_titles = self.make_event_titles()
self.table_titles = [_("Person"),_("ID")]
for event_name in self.event_titles:
self.table_titles.append(_("%(event_name)s Date") %
{'event_name' :event_name}
)
self.table_titles.append('sort') # This won't be shown in a tree
self.table_titles.append(_("%(event_name)s Place") %
{'event_name' :event_name}
)
self.build_row_data()
self.draw_display()
self.show()
def __dummy(self, obj):
"""dummy callback, needed because widget is in same glade file
as another widget, so callbacks must be defined to avoid warnings.
"""
pass
def on_help_clicked(self, obj):
"""Display the relevant portion of GRAMPS manual"""
display_help(webpage=WIKI_HELP_PAGE, section=WIKI_HELP_SEC)
def build_menu_names(self, obj):
return (_("Event Comparison Results"),None)
def draw_display(self):
model_index = 0
tree_index = 0
mylist = []
renderer = Gtk.CellRendererText()
for title in self.table_titles:
mylist.append(str)
if title == 'sort':
# This will override the previously defined column
self.eventlist.get_column(
tree_index-1).set_sort_column_id(model_index)
else:
column = Gtk.TreeViewColumn(title,renderer,text=model_index)
column.set_sort_column_id(model_index)
self.eventlist.append_column(column)
# This one numbers the tree columns: increment on new column
tree_index += 1
# This one numbers the model columns: always increment
model_index += 1
model = Gtk.ListStore(*mylist)
self.eventlist.set_model(model)
self.progress_bar.set_pass(_('Building display'),len(self.row_data))
for data in self.row_data:
model.append(row=list(data))
self.progress_bar.step()
self.progress_bar.close()
def build_row_data(self):
self.progress_bar = ProgressMeter(_('Comparing Events'),'')
self.progress_bar.set_pass(_('Building data'),len(self.my_list))
for individual_id in self.my_list:
individual = self.db.get_person_from_handle(individual_id)
name = individual.get_primary_name().get_name()
gid = individual.get_gramps_id()
the_map = defaultdict(list)
for ievent_ref in individual.get_event_ref_list():
ievent = self.db.get_event_from_handle(ievent_ref.ref)
event_name = str(ievent.get_type())
the_map[event_name].append(ievent_ref.ref)
first = True
done = False
while not done:
added = False
tlist = [name, gid] if first else ["", ""]
for ename in self.event_titles:
if ename in the_map and len(the_map[ename]) > 0:
event_handle = the_map[ename][0]
del the_map[ename][0]
date = place = ""
if event_handle:
event = self.db.get_event_from_handle(event_handle)
date = get_date(event)
sortdate = "%09d" % (
event.get_date_object().get_sort_value()
)
place_handle = event.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(
place_handle).get_title()
tlist += [date, sortdate, place]
added = True
else:
tlist += [""]*3
if first:
first = False
self.row_data.append(tlist)
elif not added:
done = True
else:
self.row_data.append(tlist)
self.progress_bar.step()
def make_event_titles(self):
"""
Create the list of unique event types, along with the person's
name, birth, and death.
This should be the column titles of the report.
"""
the_map = defaultdict(int)
for individual_id in self.my_list:
individual = self.db.get_person_from_handle(individual_id)
for event_ref in individual.get_event_ref_list():
event = self.db.get_event_from_handle(event_ref.ref)
name = str(event.get_type())
if not name:
break
the_map[name] += 1
unsort_list = sorted([(d, k) for k,d in the_map.iteritems()],by_value)
sort_list = [ item[1] for item in unsort_list ]
## Presently there's no Birth and Death. Instead there's Birth Date and
## Birth Place, as well as Death Date and Death Place.
## # Move birth and death to the begining of the list
## if _("Death") in the_map:
## sort_list.remove(_("Death"))
## sort_list = [_("Death")] + sort_list
## if _("Birth") in the_map:
## sort_list.remove(_("Birth"))
## sort_list = [_("Birth")] + sort_list
return sort_list
def on_write_table(self, obj):
f = Gtk.FileChooserDialog(_("Select filename"),
action=Gtk.FileChooserAction.SAVE,
buttons=(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE,
Gtk.ResponseType.OK))
f.set_current_folder(os.getcwd())
status = f.run()
f.hide()
if status == Gtk.ResponseType.OK:
name = get_unicode_path_from_file_chooser(f.get_filename())
doc = ODSTab(len(self.row_data))
doc.creator(self.db.get_researcher().get_name())
spreadsheet = TableReport(name, doc)
new_titles = []
skip_columns = []
index = 0
for title in self.table_titles:
if title == 'sort':
skip_columns.append(index)
else:
new_titles.append(title)
index += 1
spreadsheet.initialize(len(new_titles))
spreadsheet.write_table_head(new_titles)
index = 0
for top in self.row_data:
spreadsheet.set_row(index%2)
index += 1
spreadsheet.write_table_data(top,skip_columns)
spreadsheet.finalize()
f.destroy()
#------------------------------------------------------------------------
#
#
#
#------------------------------------------------------------------------
class EventComparisonOptions(tool.ToolOptions):
"""
Defines options and provides handling interface.
"""
def __init__(self, name,person_id=None):
tool.ToolOptions.__init__(self, name,person_id)
# Options specific for this report
self.options_dict = {
'filter' : 0,
}
filters = ReportUtils.get_person_filters(None)
self.options_help = {
'filter' : ("=num","Filter number.",
[ filt.get_name() for filt in filters ],
True ),
}
|
arunkgupta/gramps
|
gramps/plugins/tool/eventcmp.py
|
Python
|
gpl-2.0
| 16,172
|
[
"Brian"
] |
e2d3ebf322181132dc72fd43ca1e5e430be45b2d83a09a5cf6f4161cbbffa9c9
|
import re
from geometry import Geometry, GeometryParser
from polyhedron import Polyhedron
from tvtk.api import tvtk
class CellType(object):
VTK_VERTEX = 1
VTK_POLY_VERTEX = 2
VTK_LINE = 3
VTK_POLY_LIN = 4
VTK_TRIANGLE = 5
VTK_TRIANGLE_STRIP = 6
VTK_POLYGON = 7
VTK_PIXEL = 8
VTK_QUAD = 9
VTK_TETRA = 10
VTK_VOXEL = 11
VTK_HEXAHEDRON = 12
VTK_WEDGE = 13
VTK_PYRAMID = 14
class VTKParser(GeometryParser):
SUPPORTED_CELL_TYPES = [
CellType.VTK_TRIANGLE,
CellType.VTK_QUAD,
CellType.VTK_PIXEL,
CellType.VTK_TETRA,
CellType.VTK_VOXEL,
]
def _build_geometry(self, points, cells):
geometry = Geometry()
for cell in cells:
cell_type, cell = cell[0], cell[1:]
if cell_type == CellType.VTK_TRIANGLE:
poly, name = self._build_triangle(cell, points)
if cell_type == CellType.VTK_QUAD:
poly, name = self._build_quadrilateral(cell, points)
if cell_type == CellType.VTK_PIXEL:
poly, name = self._build_pixel(cell, points)
if cell_type == CellType.VTK_TETRA:
poly, name = self._build_tetrahedron(cell, points)
elif cell_type == CellType.VTK_VOXEL:
poly, name = self._build_voxel(cell, points)
geometry.add_named_polyhedron(poly, name, self.current_id)
self.current_id += 1
return geometry
def _get_cell_points(self, cell, points):
cell_points = tvtk.Points()
for idx in cell:
vertex = points[idx]
cell_points.insert_next_point(vertex)
return cell_points
def _build_triangle(self, cell, points):
faces = tvtk.CellArray()
polygon = tvtk.Polygon()
polygon.point_ids.number_of_ids = 3
polygon.point_ids.set_id(0, 0)
polygon.point_ids.set_id(1, 1)
polygon.point_ids.set_id(2, 2)
faces.insert_next_cell(polygon)
cell_points = self._get_cell_points(cell, points)
poly = Polyhedron(cell_points, faces)
name = 'Triang-{}-{}-{}'.format(*cell)
return poly, name
def _build_quadrilateral(self, cell, points):
faces = tvtk.CellArray()
polygon = tvtk.Polygon()
polygon.point_ids.number_of_ids = 4
polygon.point_ids.set_id(0, 0)
polygon.point_ids.set_id(1, 1)
polygon.point_ids.set_id(2, 2)
polygon.point_ids.set_id(3, 3)
faces.insert_next_cell(polygon)
cell_points = self._get_cell_points(cell, points)
poly = Polyhedron(cell_points, faces)
name = 'Quad-{}-{}-{}-{}'.format(*cell)
return poly, name
def _build_pixel(self, cell, points):
faces = tvtk.CellArray()
polygon = tvtk.Polygon()
polygon.point_ids.number_of_ids = 4
polygon.point_ids.set_id(0, 0)
polygon.point_ids.set_id(1, 1)
polygon.point_ids.set_id(2, 2)
polygon.point_ids.set_id(3, 3)
faces.insert_next_cell(polygon)
cell_points = self._get_cell_points(cell, points)
poly = Polyhedron(cell_points, faces)
name = 'Pixel-{}-{}-{}-{}'.format(*cell)
return poly, name
def _build_tetrahedron(self, cell, points):
faces = tvtk.CellArray()
for i0,i1,i2 in [(0,1,2), (0,3,1), (0,2,3), (1,3,2)]:
polygon = tvtk.Polygon()
polygon.point_ids.number_of_ids = 3
polygon.point_ids.set_id(0, i0)
polygon.point_ids.set_id(1, i1)
polygon.point_ids.set_id(2, i2)
faces.insert_next_cell(polygon)
cell_points = self._get_cell_points(cell, points)
poly = Polyhedron(cell_points, faces)
name = 'Tetra-{}-{}-{}-{}'.format(*cell)
return poly, name
def _build_voxel(self, cell, points):
faces = tvtk.CellArray()
for i0,i1,i2,i3 in [(0,1,3,2), (1,3,7,5), (5,7,6,4), (4,0,2,6),
(6,2,3,7), (0,1,5,4)]:
polygon = tvtk.Polygon()
polygon.point_ids.number_of_ids = 4
polygon.point_ids.set_id(0, i0)
polygon.point_ids.set_id(1, i1)
polygon.point_ids.set_id(2, i2)
polygon.point_ids.set_id(3, i3)
faces.insert_next_cell(polygon)
cell_points = self._get_cell_points(cell, points)
poly = Polyhedron(cell_points, faces)
name = 'Voxel-{}-{}-{}-{}-{}-{}-{}-{}'.format(*cell)
return poly, name
def parse(self):
points = list()
cells = list()
point_regexp = 'POINTS\s+(\d+)\s+([a-zA-Z]+)'
cell_regexp = 'CELLS\s+(\d+)'
cell_types_regexp = 'CELL_TYPES\s+(\d+)'
with open(self.filename, 'r') as _file:
lines = _file.readlines()
i = k = 0
n_points = n_cells = 0
for line in lines:
line = line.strip()
if i == 0:
if line != '# vtk DataFile Version 2.0' and\
line != '# vtk DataFile Version 3.0':
raise Exception('Wrong format!')
i += 1
continue
if not line or line[0] == '#':
k += 1
continue
if i == 2:
if line != 'ASCII':
raise Exception('Binary input not supported!')
if i == 3:
if line != 'DATASET UNSTRUCTURED_GRID':
raise Exception('VTK dataset must be unstructured grid!')
if i == 4:
match = re.match(point_regexp, line)
if match is None:
raise Exception('Line {}: wrong point declaration format!'.format(i+k))
n_points, _ = match.groups()
try:
n_points = int(n_points)
except Exception:
raise Exception('Line {}: wrong number of points!'.format(i+k))
if i >= 5 and len(points) < n_points:
try:
values = map(float, line.split())
except Exception:
raise Exception('Line {}: invalid point coordinates!'.format(i+k))
points.append(tuple(values))
if i == n_points+5:
match = re.match(cell_regexp, line)
if match is None:
raise Exception('Line {}: wrong cell declaration format!'.format(i+k))
n_cells = match.groups()[0]
try:
n_cells = int(n_cells)
except Exception:
raise Exception('Line {}: wrong number of cells!'.format(i+k))
if i > n_points+5 and len(cells) < n_cells:
match = re.findall('\d+', line)
if len(match) == 0:
raise Exception('Line {}: wrong cell format!'.format(i+k))
n_points_in_cell = int(match[0])
if len(match) != 1+n_points_in_cell:
raise Exception('Line {}: wrong cell format!'.format(i+k))
cell = list(map(int, match[1:]))
cells.append(cell)
if i == n_points+n_cells+6:
match = re.match(cell_types_regexp, line)
if match is None:
raise Exception('Line {}: wrong cell types declaration format!'.format(i+k))
if i > n_points+n_cells+6:
match = re.findall('\d+', line)
if len(match) == 0:
raise Exception('Line {}: wrong cell type format!'.format(i+k))
cell_type = int(match[0])
if cell_type not in self.SUPPORTED_CELL_TYPES:
raise Exception('Line {}: cell type {} not supported!'.format(i+k, cell_type))
cells[i-n_points-n_cells-7].insert(0, cell_type)
i += 1
return self._build_geometry(points, cells)
|
lukius/mlab-tools
|
mlab_tools/vtk_parser.py
|
Python
|
mit
| 9,153
|
[
"VTK"
] |
f64071334e1cd2ebcda4d1ae633726e74b68038c3ded1afdb1c2a05baeaef9fd
|
# !usr/bin/env python2
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2016-12-08 14:24:58
# @Last modified by: Brian Cherinka
# @Last Modified time: 2017-06-23 13:55:29
from __future__ import print_function, division, absolute_import
|
sdss/marvin
|
tests/api/__init__.py
|
Python
|
bsd-3-clause
| 296
|
[
"Brian"
] |
0a204270684dac681074c0466fcf132dbe8d248cd95687ff24ba3ed6170ae6c1
|
# SPARTA - Stochastic PArallel Rarefied-gas Time-accurate Analyzer
# http://sparta.sandia.gov
# Steve Plimpton, sjplimp@sandia.gov, Michael Gallis, magalli@sandia.gov,
# Thomas Otahal, tjotaha@sandia.gov
# Sandia National Laboratories
# Copyright (2014) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# See the README file in the top-level SPARTA directory.
# Changelog:
# - March 2018: Modified by luohancfd to enable export binary tecplot file
import math
import argparse
import sys
import os
import vtk
import glob
import platform
import multiprocessing as mp
import vtk2tecplot
from vtk2tecplot import BinaryFile
import platform
def open_grid_file(filename):
gf = None
try:
if filename.lower().endswith('.gz'):
import gzip
gf = gzip.open(filename, "r")
else:
gf = open(filename, "r")
except IOError:
print ("Unable to open SPARTA grid file: "+ filename)
sys.exit(1)
return gf
def create_grid_from_grid_file(grid_desc):
gf = open_grid_file(grid_desc["read_grid"])
for line in gf:
s = clean_line(line)
if len(s.split()) == 5:
if int(s.split()[0]) == 1 and \
int(s.split()[1]) == 0:
grid_desc["create_grid"] = {}
grid_desc["create_grid"][1] = {}
grid_desc["create_grid"][1]["Cx"] = int(s.split()[2])
grid_desc["create_grid"][1]["Cy"] = int(s.split()[3])
grid_desc["create_grid"][1]["Cz"] = int(s.split()[4])
if grid_desc["create_grid"][1]["Cz"] == 1:
grid_desc["dimension"] = 2
else:
grid_desc["dimension"] = 3
else:
print ("Error reading SPARTA grid file")
print ("top level grid specification is invalid: "+ s)
sys.exit(1)
gf.close()
return
def get_chunk(dim, chunk_size):
dmod = divmod(dim, chunk_size)
c = []
if dmod[0] == 0:
c.append([1, dmod[1]])
elif dmod[0] == 1 and dmod[1] == 0:
c.append([1, chunk_size])
else:
for i in range(dmod[0]):
c.append([i*chunk_size+1, (i+1)*chunk_size])
if dmod[1] != 0:
c.append([(dmod[0])*chunk_size+1, (dmod[0])*chunk_size + dmod[1]])
return c
def find_chunking(chunks, grid_desc, args):
Cx = grid_desc["create_grid"][1]["Cx"]
Cy = grid_desc["create_grid"][1]["Cy"]
xc = get_chunk(Cx, args.xchunk)
yc = get_chunk(Cy, args.ychunk)
if grid_desc["dimension"] == 3:
Cz = grid_desc["create_grid"][1]["Cz"]
zc = get_chunk(Cz, args.zchunk)
for k in zc:
for j in yc:
for i in xc:
chunks.append({"x": i, "y": j, "z": k})
else:
for j in yc:
for i in xc:
chunks.append({"x": i, "y": j, "z": [1,1]})
def process_grid_chunk(chunk_id, chunk_info, num_chunks, \
grid_desc, time_steps_dict, output_file, do_tecplot,do_tecplot_bin,var_names):
xi = grid_desc["create_box"]["xhi"] - grid_desc["create_box"]["xlo"]
yi = grid_desc["create_box"]["yhi"] - grid_desc["create_box"]["ylo"]
zi = grid_desc["create_box"]["zhi"] - grid_desc["create_box"]["zlo"]
px = grid_desc["create_grid"][1]["Cx"]
py = grid_desc["create_grid"][1]["Cy"]
pz = grid_desc["create_grid"][1]["Cz"]
ug = None
spacing = [xi/float(px), yi/float(py), zi/float(pz)]
origin = [grid_desc["create_box"]["xlo"] + spacing[0]*(chunk_info["x"][0]-1), \
grid_desc["create_box"]["ylo"] + spacing[1]*(chunk_info["y"][0]-1), \
grid_desc["create_box"]["zlo"] + spacing[2]*(chunk_info["z"][0]-1)]
ndims = [chunk_info["x"][1] - chunk_info["x"][0] + 2, \
chunk_info["y"][1] - chunk_info["y"][0] + 2, \
chunk_info["z"][1] - chunk_info["z"][0] + 2]
if "read_grid" in grid_desc:
read_grid_file(grid_desc, chunk_info)
if "slice" in grid_desc:
grid_desc["slice_plane_indices"] = []
if grid_desc["dimension"] == 3:
ug = create_3d_amr_grids(grid_desc, 1, 0, 0, origin, spacing, ndims, chunk_info, "")
elif grid_desc["dimension"] == 2:
ug = create_2d_amr_grids(grid_desc, 1, 0, 0, origin, spacing, ndims, chunk_info, "")
if not ug:
return chunk_id, {}
filepaths = {}
id_hash = {}
gids = ug.GetCellData().GetArray("GlobalIds")
if gids:
for i in range(gids.GetNumberOfTuples()):
id_hash[int(gids.GetTuple1(i))] = i
ug.GetCellData().RemoveArray("GlobalIds")
if "slice" not in grid_desc:
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetInputData(ug)
for time in sorted(time_steps_dict.keys()):
read_time_step_data(time_steps_dict[time], ug, id_hash)
if do_tecplot or do_tecplot_bin:
time_dir = os.path.join(output_file, output_file + '_' + str(time))
try:
os.makedirs(time_dir)
except OSError:
if not os.path.isdir(time_dir):
raise
if do_tecplot:
filepath = os.path.join(time_dir, output_file + '_' + str(chunk_id) + '_' + str(time) + '.dat')
if do_tecplot_bin:
filepath = os.path.join(time_dir, output_file + '_' + str(chunk_id) + '_' + str(time) + '.plt')
else:
filepath = os.path.join(output_file, output_file + '_' + str(chunk_id) + '_' + str(time) + '.vtu')
if 0 not in filepaths:
filepaths[0] = {}
if time not in filepaths[0]:
filepaths[0][time] = {}
filepaths[0][time][chunk_id] = filepath
if os.path.exists(filepath):
print ("WARNING: File exists, skip:%s"%(filepath,))
else:
if do_tecplot:
fh = open(filepath, "w")
vtk2tecplot.write_tecplot_ascii(ug, fh, output_file + " at time = " + str(time), time, chunk_id,var_names)
fh.close()
elif do_tecplot_bin:
vtk2tecplot.write_tecplot_bin(ug, filepath, output_file + " at time = " + str(time), time, chunk_id, var_names)
else:
writer.SetFileName(filepath)
writer.Write()
else:
vp = vtk.vtkPlane()
writer = vtk.vtkXMLPolyDataWriter()
cut = vtk.vtkCutter()
cut.SetInputData(ug)
writer.SetInputConnection(cut.GetOutputPort())
for idx in grid_desc["slice_plane_indices"]:
plane = grid_desc["slice"][idx]
vp.SetOrigin(plane["px"], plane["py"], plane["pz"])
vp.SetNormal(plane["nx"], plane["ny"], plane["nz"])
cut.SetCutFunction(vp)
for time in sorted(time_steps_dict.keys()):
read_time_step_data(time_steps_dict[time], ug, id_hash)
filepath = os.path.join(output_file, output_file + '_' + str(idx) + '_' + str(chunk_id) + '_' + str(time) + '.vtp')
if idx not in filepaths:
filepaths[idx] = {}
if time not in filepaths[idx]:
filepaths[idx][time] = {}
filepaths[idx][time][chunk_id] = filepath
if os.path.exists(filepath):
print ("WARNING: File exists, skip:%s"%(filepath,))
else:
writer.SetFileName(filepath)
writer.Write()
return chunk_id, filepaths
def create_3d_amr_grids(grid_desc, level, parent_bit_mask, parent_id, \
origin, spacing, ndims, chunk_info, dashed_id):
if level_contains_refined_cells(level, grid_desc, dashed_id):
if level == 1:
Dx = grid_desc["create_grid"][1]["Cx"]
Dy = grid_desc["create_grid"][1]["Cy"]
Dz = grid_desc["create_grid"][1]["Cz"]
level_one_bit_mask = int(math.floor(math.log(int(Dx*Dy*Dz),2)) + 1)
xc = range(chunk_info["x"][0], chunk_info["x"][1]+1)
yc = range(chunk_info["y"][0], chunk_info["y"][1]+1)
zc = range(chunk_info["z"][0], chunk_info["z"][1]+1)
cell_info = {'Cx' : 1, 'Cy' : 1, 'Cz' : 1}
get_cell_size(level, grid_desc, cell_info)
Cx = cell_info['Cx']
Cy = cell_info['Cy']
Cz = cell_info['Cz']
xi = spacing[0]
yi = spacing[1]
zi = spacing[2]
r_spacing = [xi/float(Cx), yi/float(Cy), zi/float(Cz)]
n_spacing = [xi, yi, zi]
r_ndims = [Cx + 1, Cy + 1, Cz + 1]
n_ndims = [2, 2, 2]
num_of_cells = int( (ndims[0]-1)*(ndims[1]-1)*(ndims[2]-1) )
bit_mask = int(math.floor(math.log(num_of_cells,2)) + 1)
k_append = vtk.vtkAppendFilter()
k_append.MergePointsOn()
cell_index = 1
for k in range(ndims[2]-1):
if level == 1:
zindex = (zc[k]-1)*Dx*Dy
zl = origin[2] + k*spacing[2]
y_append = vtk.vtkAppendFilter()
y_append.MergePointsOn()
y_ug = None
for j in range(ndims[1]-1):
if level == 1:
yindex = (yc[j]-1)*Dx
yl = origin[1] + j*spacing[1]
x_append = vtk.vtkAppendFilter()
x_append.MergePointsOn()
x_ug = None
for i in range(ndims[0]-1):
xl = origin[0] + i*spacing[0]
if level == 1:
bit_mask = level_one_bit_mask
cell_index = xc[i] + yindex + zindex
refine = is_3d_cell_refined(level, xc[i], yc[j], zc[k], cell_index, \
grid_desc, dashed_id, xi, yi, zi, r_spacing, r_ndims)
else:
refine = is_3d_cell_refined(level, i+1, j+1, k+1, cell_index, grid_desc, \
dashed_id, xi, yi, zi, r_spacing, r_ndims)
refined_cell_index = cell_index*(2**parent_bit_mask) + parent_id
if refine:
if not dashed_id:
next_dashed_id = str(cell_index)
else:
next_dashed_id = dashed_id + "-" + str(cell_index)
r_ug = create_3d_amr_grids(grid_desc, level+1, bit_mask+parent_bit_mask, refined_cell_index, \
[xl, yl, zl], r_spacing, r_ndims, chunk_info, next_dashed_id)
else:
r_ug = build_3d_grid(-1, refined_cell_index, [xl, yl, zl], n_spacing, n_ndims, chunk_info, grid_desc)
if r_ug:
x_append.AddInputData(r_ug)
cell_index += 1
if x_append.GetInputList().GetNumberOfItems():
x_append.Update()
x_ug = x_append.GetOutput()
y_append.AddInputData(x_ug)
if y_append.GetInputList().GetNumberOfItems():
y_append.Update()
y_ug = y_append.GetOutput()
k_append.AddInputData(y_ug)
if k_append.GetInputList().GetNumberOfItems():
k_append.Update()
return k_append.GetOutput()
else:
return None
else:
return build_3d_grid(parent_bit_mask, parent_id, origin, spacing, ndims, chunk_info, grid_desc)
def read_grid_file(grid_desc, chunk_info):
gf = open_grid_file(grid_desc["read_grid"])
Dx = grid_desc["create_grid"][1]["Cx"]
Dy = grid_desc["create_grid"][1]["Cy"]
grid_desc["parent_grid"] = {}
for line in gf:
s = clean_line(line)
if len(s.split()) == 5:
id = s.split()[1].split('-')
if len(id) and int(id[0]) != 0:
index = int(id[0]) - 1
zloc = 0
if grid_desc["dimension"] == 3:
zloc = math.floor(index/(Dx*Dy))
yloc = math.floor((index - zloc*Dx*Dy)/Dx)
xloc = index - yloc*Dx - zloc*Dx*Dy
xloc += 1
yloc += 1
zloc += 1
if xloc >= chunk_info["x"][0] and xloc <= chunk_info["x"][1] and \
yloc >= chunk_info["y"][0] and yloc <= chunk_info["y"][1] and \
zloc >= chunk_info["z"][0] and zloc <= chunk_info["z"][1]:
cld = grid_desc["parent_grid"]
for pid in id:
if int(pid) in cld:
cld = cld[int(pid)]['np']
else:
cld[int(pid)] = {'px':int(s.split()[2]), 'py':int(s.split()[3]), 'pz':int(s.split()[4]), 'np':{}}
gf.close()
def level_contains_refined_cells(level, grid_desc, dashed_id):
if "parent_grid" in grid_desc:
if level == 1:
return bool(grid_desc["parent_grid"])
else:
s = dashed_id.split('-')
d = None
for id in s:
if not d:
if int(id) in grid_desc["parent_grid"]:
d = grid_desc["parent_grid"][int(id)]['np']
else:
return False
else:
if int(id) in d:
d = d[int(id)]['np']
else:
return False
return bool(d)
else:
return level+1 in grid_desc["create_grid"]
def is_3d_cell_refined(level, i, j, k, cell_index, grid_desc, dashed_id, xi, yi, zi, r_spacing, r_ndims):
if "parent_grid" not in grid_desc:
Px = grid_desc["create_grid"][level+1]["Px"]
Py = grid_desc["create_grid"][level+1]["Py"]
Pz = grid_desc["create_grid"][level+1]["Pz"]
return (i in Px and j in Py and k in Pz)
else:
if level == 1:
if cell_index in grid_desc["parent_grid"]:
Cx = grid_desc["parent_grid"][cell_index]['px']
Cy = grid_desc["parent_grid"][cell_index]['py']
Cz = grid_desc["parent_grid"][cell_index]['pz']
r_spacing[0] = xi/float(Cx)
r_spacing[1] = yi/float(Cy)
r_spacing[2] = zi/float(Cz)
r_ndims[0] = Cx + 1
r_ndims[1] = Cy + 1
r_ndims[2] = Cz + 1
return True
else:
return False
else:
s = dashed_id.split('-')
d = None
lc = 2
for id in s:
if not d:
d = grid_desc["parent_grid"][int(id)]['np']
else:
d = d[int(id)]['np']
if cell_index in d and lc == level:
Cx = d[cell_index]['px']
Cy = d[cell_index]['py']
Cz = d[cell_index]['pz']
r_spacing[0] = xi/float(Cx)
r_spacing[1] = yi/float(Cy)
r_spacing[2] = zi/float(Cz)
r_ndims[0] = Cx + 1
r_ndims[1] = Cy + 1
r_ndims[2] = Cz + 1
return True
lc += 1
return False
def is_2d_cell_refined(level, i, j, cell_index, grid_desc, dashed_id, xi, yi, r_spacing, r_ndims):
if "parent_grid" not in grid_desc:
Px = grid_desc["create_grid"][level+1]["Px"]
Py = grid_desc["create_grid"][level+1]["Py"]
return (i in Px and j in Py)
else:
if level == 1:
if cell_index in grid_desc["parent_grid"]:
Cx = grid_desc["parent_grid"][cell_index]['px']
Cy = grid_desc["parent_grid"][cell_index]['py']
r_spacing[0] = xi/float(Cx)
r_spacing[1] = yi/float(Cy)
r_ndims[0] = Cx + 1
r_ndims[1] = Cy + 1
return True
else:
return False
else:
s = dashed_id.split('-')
d = None
lc = 2
for id in s:
if not d:
d = grid_desc["parent_grid"][int(id)]['np']
else:
d = d[int(id)]['np']
if cell_index in d and lc == level:
Cx = d[cell_index]['px']
Cy = d[cell_index]['py']
r_spacing[0] = xi/float(Cx)
r_spacing[1] = yi/float(Cy)
r_ndims[0] = Cx + 1
r_ndims[1] = Cy + 1
return True
lc += 1
return False
def get_cell_size(level, grid_desc, cell_info):
if "parent_grid" not in grid_desc:
cell_info['Cx'] = grid_desc["create_grid"][level+1]["Cx"]
cell_info['Cy'] = grid_desc["create_grid"][level+1]["Cy"]
if grid_desc["dimension"] == 3:
cell_info['Cz'] = grid_desc["create_grid"][level+1]["Cz"]
def find_2d_intersected_cells(intersecting_planes, parent_bit_mask, parent_id, origin, \
spacing, ndims, chunk_info, grid_desc):
append = vtk.vtkAppendFilter()
append.MergePointsOn()
a1 = spacing[0]/2.0
a2 = spacing[1]/2.0
index = 1
for j in range(ndims[1]-1):
p2 = a2 + origin[1] + j*spacing[1]
for i in range(ndims[0]-1):
p1 = a1 + origin[0] + i*spacing[0]
for plane in intersecting_planes:
n1 = plane["nx"]
n2 = plane["ny"]
n3 = plane["nz"]
p01 = plane["px"]
p02 = plane["py"]
p03 = plane["pz"]
d = math.fabs(n1*(p1-p01) + n2*(p2-p02) + n3*(-p03))
rhs = a1*math.fabs(n1) + a2*math.fabs(n2)
if d < rhs or math.fabs(d-rhs) < 0.000001:
ug = vtk.vtkUnstructuredGrid()
points = vtk.vtkPoints()
for pj in range(2):
j_offset = pj*spacing[1]
for pi in range(2):
points.InsertNextPoint(p1 - a1 + pi*spacing[0], \
p2 - a2 + j_offset, \
0.0)
ug.SetPoints(points)
quad = vtk.vtkQuad()
quad.GetPointIds().SetId(0, 0)
quad.GetPointIds().SetId(1, 1)
quad.GetPointIds().SetId(2, 3)
quad.GetPointIds().SetId(3, 2)
ug.InsertNextCell(quad.GetCellType(), quad.GetPointIds())
gids = vtk.vtkIdTypeArray()
gids.SetName("GlobalIds")
if parent_id == 0:
Dx = grid_desc["create_grid"][1]["Cx"]
xc = range(chunk_info["x"][0], chunk_info["x"][1]+1)
yc = range(chunk_info["y"][0], chunk_info["y"][1]+1)
yindex = (yc[j]-1)*Dx
gids.InsertNextTuple1(xc[i]+yindex)
else:
if parent_bit_mask == -1:
gids.InsertNextTuple1(parent_id)
else:
gids.InsertNextTuple1(index*(2**parent_bit_mask) + parent_id)
ug.GetCellData().AddArray(gids)
ug.GetCellData().SetActiveGlobalIds("GlobalIds")
append.AddInputData(ug)
break
index += 1
if append.GetInputList().GetNumberOfItems():
append.Update()
return append.GetOutput()
else:
return None
def find_3d_intersected_cells(intersecting_planes, parent_bit_mask, parent_id, origin, \
spacing, ndims, chunk_info, grid_desc):
append = vtk.vtkAppendFilter()
append.MergePointsOn()
a1 = spacing[0]/2.0
a2 = spacing[1]/2.0
a3 = spacing[2]/2.0
index = 1
for k in range(ndims[2]-1):
p3 = a3 + origin[2] + k*spacing[2]
for j in range(ndims[1]-1):
p2 = a2 + origin[1] + j*spacing[1]
for i in range(ndims[0]-1):
p1 = a1 + origin[0] + i*spacing[0]
for plane in intersecting_planes:
n1 = plane["nx"]
n2 = plane["ny"]
n3 = plane["nz"]
p01 = plane["px"]
p02 = plane["py"]
p03 = plane["pz"]
d = math.fabs(n1*(p1-p01) + n2*(p2-p02) + n3*(p3-p03))
rhs = a1*math.fabs(n1) + a2*math.fabs(n2) + a3*math.fabs(n3)
if d < rhs or math.fabs(d-rhs) < 0.000001:
ug = vtk.vtkUnstructuredGrid()
points = vtk.vtkPoints()
for pk in range(2):
k_offset = pk*spacing[2]
for pj in range(2):
j_offset = pj*spacing[1]
for pi in range(2):
points.InsertNextPoint(p1 - a1 + pi*spacing[0], \
p2 - a2 + j_offset, \
p3 - a3 + k_offset)
ug.SetPoints(points)
hex = vtk.vtkHexahedron()
hex.GetPointIds().SetId(0, 0)
hex.GetPointIds().SetId(1, 1)
hex.GetPointIds().SetId(2, 3)
hex.GetPointIds().SetId(3, 2)
hex.GetPointIds().SetId(4, 4)
hex.GetPointIds().SetId(5, 5)
hex.GetPointIds().SetId(6, 7)
hex.GetPointIds().SetId(7, 6)
ug.InsertNextCell(hex.GetCellType(), hex.GetPointIds())
gids = vtk.vtkIdTypeArray()
gids.SetName("GlobalIds")
if parent_id == 0:
Dx = grid_desc["create_grid"][1]["Cx"]
Dy = grid_desc["create_grid"][1]["Cy"]
xc = range(chunk_info["x"][0], chunk_info["x"][1]+1)
yc = range(chunk_info["y"][0], chunk_info["y"][1]+1)
zc = range(chunk_info["z"][0], chunk_info["z"][1]+1)
zindex = (zc[k]-1)*Dx*Dy
yindex = (yc[j]-1)*Dx
gids.InsertNextTuple1(xc[i]+yindex+zindex)
else:
if parent_bit_mask == -1:
gids.InsertNextTuple1(parent_id)
else:
gids.InsertNextTuple1(index*(2**parent_bit_mask) + parent_id)
ug.GetCellData().AddArray(gids)
ug.GetCellData().SetActiveGlobalIds("GlobalIds")
append.AddInputData(ug)
break
index += 1
if append.GetInputList().GetNumberOfItems():
append.Update()
return append.GetOutput()
else:
return None
def cells_on_slice_planes(parent_bit_mask, parent_id, origin, spacing, ndims, chunk_info, grid_desc):
intersecting_planes = []
for idx, plane in enumerate(grid_desc["slice"]):
a1 = ((ndims[0]-1)*spacing[0])/2.0
a2 = ((ndims[1]-1)*spacing[1])/2.0
a3 = ((ndims[2]-1)*spacing[2])/2.0
p1 = a1 + origin[0]
p2 = a2 + origin[1]
p3 = a3 + origin[2]
n1 = plane["nx"]
n2 = plane["ny"]
n3 = plane["nz"]
p01 = plane["px"]
p02 = plane["py"]
p03 = plane["pz"]
d = math.fabs(n1*(p1-p01) + n2*(p2-p02) + n3*(p3-p03))
rhs = a1*math.fabs(n1) + a2*math.fabs(n2) + a3*math.fabs(n3)
if d < rhs or math.fabs(d-rhs) < 0.000001:
intersecting_planes.append(plane)
if idx not in grid_desc["slice_plane_indices"]:
grid_desc["slice_plane_indices"].append(idx)
if intersecting_planes:
if grid_desc["dimension"] == 3:
return find_3d_intersected_cells(intersecting_planes, parent_bit_mask, parent_id, origin, \
spacing, ndims, chunk_info, grid_desc)
else:
return find_2d_intersected_cells(intersecting_planes, parent_bit_mask, parent_id, origin, \
spacing, ndims, chunk_info, grid_desc)
else:
return None
def build_3d_grid(parent_bit_mask, parent_id, origin, spacing, ndims, chunk_info, grid_desc):
if "slice" in grid_desc:
return cells_on_slice_planes(parent_bit_mask, parent_id, origin, \
spacing, ndims, chunk_info, grid_desc)
ug = vtk.vtkUnstructuredGrid()
points = vtk.vtkPoints()
for k in range(ndims[2]):
k_offset = k*spacing[2]
for j in range(ndims[1]):
j_offset = j*spacing[1]
for i in range(ndims[0]):
points.InsertNextPoint(origin[0] + i*spacing[0], \
origin[1] + j_offset, \
origin[2] + k_offset)
ug.SetPoints(points)
hex = vtk.vtkHexahedron()
for k in range(ndims[2]-1):
kl_offset = k*ndims[1]*ndims[0]
ku_offset = (k+1)*ndims[1]*ndims[0]
for j in range(ndims[1]-1):
jl_offset = j*ndims[0]
ju_offset = (j + 1)*ndims[0]
ll = jl_offset + kl_offset
uu = ju_offset + ku_offset
lu = jl_offset + ku_offset
ul = ju_offset + kl_offset
for i in range(ndims[0]-1):
hex.GetPointIds().SetId(0, i + ll)
hex.GetPointIds().SetId(1, i + 1 + ll)
hex.GetPointIds().SetId(2, i + 1 + ul)
hex.GetPointIds().SetId(3, i + ul)
hex.GetPointIds().SetId(4, i + lu)
hex.GetPointIds().SetId(5, i + 1 + lu)
hex.GetPointIds().SetId(6, i + 1 + uu)
hex.GetPointIds().SetId(7, i + uu)
ug.InsertNextCell(hex.GetCellType(), hex.GetPointIds())
gids = vtk.vtkIdTypeArray()
gids.SetName("GlobalIds")
if parent_id == 0:
Dx = grid_desc["create_grid"][1]["Cx"]
Dy = grid_desc["create_grid"][1]["Cy"]
xc = range(chunk_info["x"][0], chunk_info["x"][1]+1)
yc = range(chunk_info["y"][0], chunk_info["y"][1]+1)
zc = range(chunk_info["z"][0], chunk_info["z"][1]+1)
for k in range(ndims[2]-1):
zindex = (zc[k]-1)*Dx*Dy
for j in range(ndims[1]-1):
yindex = (yc[j]-1)*Dx
for i in range(ndims[0]-1):
gids.InsertNextTuple1(xc[i]+yindex+zindex)
else:
for i in range(ug.GetNumberOfCells()):
if parent_bit_mask == -1:
gids.InsertNextTuple1(parent_id)
else:
gids.InsertNextTuple1((i+1)*(2**parent_bit_mask) + parent_id)
ug.GetCellData().AddArray(gids)
ug.GetCellData().SetActiveGlobalIds("GlobalIds")
return ug
def build_2d_grid(parent_bit_mask, parent_id, origin, spacing, ndims, chunk_info, grid_desc):
if "slice" in grid_desc:
return cells_on_slice_planes(parent_bit_mask, parent_id, origin, \
spacing, ndims, chunk_info, grid_desc)
ug = vtk.vtkUnstructuredGrid()
points = vtk.vtkPoints()
for j in range(ndims[1]):
j_offset = j*spacing[1]
for i in range(ndims[0]):
points.InsertNextPoint(origin[0] + i*spacing[0], \
origin[1] + j_offset, \
0.0)
ug.SetPoints(points)
quad = vtk.vtkQuad()
for j in range(ndims[1]-1):
jl_offset = j*ndims[0]
ju_offset = (j+1)*ndims[0]
for i in range(ndims[0]-1):
quad.GetPointIds().SetId(0, i + jl_offset)
quad.GetPointIds().SetId(1, i + 1 + jl_offset)
quad.GetPointIds().SetId(2, i + 1 + ju_offset)
quad.GetPointIds().SetId(3, i + ju_offset)
ug.InsertNextCell(quad.GetCellType(), quad.GetPointIds())
gids = vtk.vtkIdTypeArray()
gids.SetName("GlobalIds")
if parent_id == 0:
Dx = grid_desc["create_grid"][1]["Cx"]
xc = range(chunk_info["x"][0], chunk_info["x"][1]+1)
yc = range(chunk_info["y"][0], chunk_info["y"][1]+1)
for j in range(ndims[1]-1):
yindex = (yc[j]-1)*Dx
for i in range(ndims[0]-1):
gids.InsertNextTuple1(xc[i]+yindex)
else:
for i in range(ug.GetNumberOfCells()):
if parent_bit_mask == -1:
gids.InsertNextTuple1(parent_id)
else:
gids.InsertNextTuple1((i+1)*(2**parent_bit_mask) + parent_id)
ug.GetCellData().AddArray(gids)
ug.GetCellData().SetActiveGlobalIds("GlobalIds")
return ug
def create_2d_amr_grids(grid_desc, level, parent_bit_mask, parent_id, \
origin, spacing, ndims, chunk_info, dashed_id):
if level_contains_refined_cells(level, grid_desc, dashed_id):
if level == 1:
Dx = grid_desc["create_grid"][1]["Cx"]
Dy = grid_desc["create_grid"][1]["Cy"]
level_one_bit_mask = int(math.floor(math.log(int(Dx*Dy),2)) + 1)
xc = range(chunk_info["x"][0], chunk_info["x"][1]+1)
yc = range(chunk_info["y"][0], chunk_info["y"][1]+1)
cell_info = {'Cx' : 1, 'Cy' : 1}
get_cell_size(level, grid_desc, cell_info)
Cx = cell_info['Cx']
Cy = cell_info['Cy']
num_of_cells = int( (ndims[0]-1)*(ndims[1]-1) )
bit_mask = int(math.floor(math.log(num_of_cells,2)) + 1)
zl = 0.0
xi = spacing[0]
yi = spacing[1]
zi = 0.0
r_spacing = [xi/float(Cx), yi/float(Cy), 0.0]
n_spacing = [xi, yi, zi]
r_ndims = [Cx + 1, Cy + 1, 1]
n_ndims = [2, 2, 2]
y_append = vtk.vtkAppendFilter()
y_append.MergePointsOn()
cell_index = 1
for j in range(ndims[1]-1):
if level == 1:
yindex = (yc[j]-1)*Dx
yl = origin[1] + j*spacing[1]
x_append = vtk.vtkAppendFilter()
x_append.MergePointsOn()
x_ug = None
for i in range(ndims[0]-1):
xl = origin[0] + i*spacing[0]
if level == 1:
bit_mask = level_one_bit_mask
cell_index = xc[i] + yindex
refine = is_2d_cell_refined(level, xc[i], yc[j], cell_index, \
grid_desc, dashed_id, xi, yi, r_spacing, r_ndims)
else:
refine = is_2d_cell_refined(level, i+1, j+1, cell_index, grid_desc, \
dashed_id, xi, yi, r_spacing, r_ndims)
refined_cell_index = cell_index*(2**parent_bit_mask) + parent_id
if refine:
if not dashed_id:
next_dashed_id = str(cell_index)
else:
next_dashed_id = dashed_id + "-" + str(cell_index)
r_ug = create_2d_amr_grids(grid_desc, level+1, bit_mask+parent_bit_mask, refined_cell_index, \
[xl, yl, zl], r_spacing, r_ndims, chunk_info, next_dashed_id)
else:
r_ug = build_2d_grid(-1, refined_cell_index, [xl, yl, zl], n_spacing, n_ndims, chunk_info, grid_desc)
x_append.AddInputData(r_ug)
cell_index += 1
if x_append.GetInputList().GetNumberOfItems():
x_append.Update()
x_ug = x_append.GetOutput()
y_append.AddInputData(x_ug)
if y_append.GetInputList().GetNumberOfItems():
y_append.Update()
return y_append.GetOutput()
else:
return None
else:
return build_2d_grid(parent_bit_mask, parent_id, origin, spacing, ndims, chunk_info, grid_desc)
def clean_line(line):
line = line.partition('#')[0]
return line.strip()
def create_parent_ranges(item, level, index, parent, grid_desc):
if item == "*":
grid_desc["create_grid"][level][index] = range(1, parent + 1)
elif item[0] == "*":
rb = int(item.split('*')[1])
grid_desc["create_grid"][level][index] = range(1, rb + 1)
elif item[-1:] == "*":
lb = int(item.split('*')[0])
grid_desc["create_grid"][level][index] = range(lb, parent + 1)
elif len(item.split('*')) == 1:
grid_desc["create_grid"][level][index] = [int(item)]
elif len(item.split('*')) == 2:
b = item.split('*')
grid_desc["create_grid"][level][index] = range(int(b[0]), int(b[1]) + 1)
def read_grid_levels(gl_array, grid_desc, level):
if len(gl_array) % 8 or \
gl_array[0].lower() != "level" or \
int(gl_array[1]) != level:
print ("Error reading SPARTA grid description file")
print ("create_grid specification is invalid: "+ ' '.join(gl_array))
sys.exit(1)
grid_desc["create_grid"][level] = {}
Px = grid_desc["create_grid"][level - 1]["Cx"]
Py = grid_desc["create_grid"][level - 1]["Cy"]
Pz = grid_desc["create_grid"][level - 1]["Cz"]
create_parent_ranges(gl_array[2], level, "Px", Px, grid_desc)
create_parent_ranges(gl_array[3], level, "Py", Py, grid_desc)
create_parent_ranges(gl_array[4], level, "Pz", Pz, grid_desc)
grid_desc["create_grid"][level]["Cx"] = int(gl_array[5])
grid_desc["create_grid"][level]["Cy"] = int(gl_array[6])
grid_desc["create_grid"][level]["Cz"] = int(gl_array[7])
if len(gl_array) > 8:
read_grid_levels(gl_array[8:], grid_desc, level + 1)
def read_grid_description_file(sif, grid_desc):
for line in sif:
s = clean_line(line)
if s.lower()[:9] == "dimension" and len(s.split()) == 2:
dimension = int(s.split()[1])
if dimension != 2 and dimension != 3:
print ("Error reading SPARTA grid description file")
print ("dimension must be either 2 or 3: "+str( dimension))
sys.exit(1)
else:
grid_desc["dimension"] = dimension
elif s.lower()[:5] == "slice" and len(s.split()) == 7:
p = {}
p["nx"] = float(s.split()[1])
p["ny"] = float(s.split()[2])
p["nz"] = float(s.split()[3])
norm = math.sqrt(math.pow(p["nx"],2) + math.pow(p["ny"],2) + math.pow(p["nz"],2))
p["nx"] = p["nx"]/norm
p["ny"] = p["ny"]/norm
p["nz"] = p["nz"]/norm
p["px"] = float(s.split()[4])
p["py"] = float(s.split()[5])
p["pz"] = float(s.split()[6])
if "slice" not in grid_desc:
grid_desc["slice"] = []
grid_desc["slice"].append(p)
elif s.lower()[:10] == "create_box" and len(s.split()) == 7:
grid_desc["create_box"] = {}
if s.split()[1] < s.split()[2] and \
s.split()[3] < s.split()[4] and \
s.split()[5] < s.split()[6]:
grid_desc["create_box"]["xlo"] = float(s.split()[1])
grid_desc["create_box"]["xhi"] = float(s.split()[2])
grid_desc["create_box"]["ylo"] = float(s.split()[3])
grid_desc["create_box"]["yhi"] = float(s.split()[4])
grid_desc["create_box"]["zlo"] = float(s.split()[5])
grid_desc["create_box"]["zhi"] = float(s.split()[6])
else:
print ("Error reading SPARTA grid description file")
print ("create_box specification is invalid: ", s)
sys.exit(1)
elif s.lower()[:11] == "create_grid" and len(s.split()) > 3:
grid_desc["create_grid"] = {}
if int(s.split()[1]) > 0 and \
int(s.split()[2]) > 0 and \
int(s.split()[3]) > 0:
grid_desc["create_grid"][1] = {}
grid_desc["create_grid"][1]["Cx"] = int(s.split()[1])
grid_desc["create_grid"][1]["Cy"] = int(s.split()[2])
grid_desc["create_grid"][1]["Cz"] = int(s.split()[3])
if len(s.split()) > 4:
read_grid_levels(s.split()[4:], grid_desc, 2)
else:
print ("Error reading SPARTA grid description file")
print ("create_grid specification is invalid: ", s)
elif s.lower()[:9] == "read_grid" and len(s.split()) == 2:
filename = s.split()[1]
if not os.path.isfile(filename):
print ("Error reading SPARTA grid description file")
print ("read_grid filename is not available: ", filename)
sys.exit(1)
else:
grid_desc["read_grid"] = filename
elif len(s):
print ("Error reading SPARTA grid description file")
print ("File contains unrecognized keyword: ", s)
sys.exit(1)
def read_time_steps(result_file_list, time_steps_dict):
for f in result_file_list:
try:
fh = open(f, "r")
except IOError:
print ("Unable to open SPARTA result file: ", f)
sys.exit(1)
line = 'a' #dummy value
while line:
line = fh.readline()
s = clean_line(line)
if s.lower().replace(" ", "") == "item:timestep":
time = int(fh.readline())
if time in time_steps_dict.keys():
time_steps_dict[time].append(f)
else:
time_steps_dict[time] = [f]
break
fh.close()
def read_time_step_data(time_step_file_list, ug, id_hash):
for f in time_step_file_list:
try:
fh = open(f, "r")
except IOError:
print ("Unable to open SPARTA result file: ", f)
return
array_names = []
for line in fh:
s = clean_line(line)
if s.lower().replace(" ", "")[:10] == "item:cells":
for name in s.split()[2:]:
array_names.append(name)
break
id_index = 0
try:
id_index = array_names.index('id')
except ValueError:
print ("Error reading SPARTA result file: ", f)
print ("id column not given in file.")
return
if not ug.GetCellData().GetNumberOfArrays():
for name in array_names:
array = vtk.vtkDoubleArray()
array.SetName(name)
array.SetNumberOfComponents(1)
array.SetNumberOfTuples(ug.GetNumberOfCells())
array.FillComponent(0, 0.0)
ug.GetCellData().AddArray(array)
if ug.GetCellData().GetNumberOfArrays() != len(array_names):
print ("Error reading SPARTA result file: ", f)
print ("Expected data columns: ", ug.GetCellData().GetNumberOfArrays())
print ("Found data columns: ", len(array_names))
return
arrays = []
for val in array_names:
arrays.append(ug.GetCellData().GetArray(val))
cells_read = 0
for line in fh:
s = clean_line(line)
sl = s.split()
if len(sl) == len(array_names):
index = int(sl[id_index])
if index not in id_hash:
continue
for idx, val in enumerate(array_names):
arrays[idx].SetValue(id_hash[index], float(sl[idx]))
cells_read += 1
if cells_read == ug.GetNumberOfCells():
break
else:
print ("Error reading SPARTA result file: ", f)
print ("Flow data line cannot be processed: ", line)
return
fh.close()
def write_pvd_file(time_steps, file_name, chunking, index):
fh = open(file_name + ".pvd", "w")
fh.write('<?xml version="1.0"?>\n')
fh.write('<VTKFile type="Collection" version="0.1"\n')
fh.write(' byte_order="LittleEndian"\n')
fh.write(' compressor="vtkZLibDataCompressor">\n')
fh.write(' <Collection> \n')
for time in time_steps:
for chunk in range(len(chunking)):
if chunk in report_chunk_complete.filepaths[index][time]:
fh.write(' <DataSet timestep="' + str(time) + '" group="" part="' + str(chunk) + '" \n')
fh.write(' file="' + report_chunk_complete.filepaths[index][time][chunk] + '"/>\n')
fh.write(' </Collection> \n')
fh.write('</VTKFile> \n')
fh.close()
def report_chunk_complete(rv):
chunk_id = rv[0]
for s in rv[1]:
for t in rv[1][s]:
for c in rv[1][s][t]:
if s not in report_chunk_complete.filepaths:
report_chunk_complete.filepaths[s] = {}
if t not in report_chunk_complete.filepaths[s]:
report_chunk_complete.filepaths[s][t] = {}
report_chunk_complete.filepaths[s][t][c] = rv[1][s][t][c]
report_chunk_complete.count += 1
rem = report_chunk_complete.num_chunks - report_chunk_complete.count
print ("Completed grid chunk number: "+ str(chunk_id)+","+str(rem)+" chunk(s) remaining" )
report_chunk_complete.count = 0
report_chunk_complete.num_chunks = 0
report_chunk_complete.filepaths = {}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("sparta_grid_description_file", help="SPARTA grid description input file name")
parser.add_argument("paraview_output_file", help="ParaView output file name")
group1 = parser.add_mutually_exclusive_group()
group1.add_argument('-t', '--tecplot', action='store_true',
default=False, help="Create TecPlot ASCII dat files intead of VTK vtu (slice output unavailable as TecPlot)")
group1.add_argument('-tb', '--tecplot_binary', action='store_true',
default=False, help="Create TecPlot Binary plt files intead of VTK vtu (slice output unavailable as TecPlot)")
group = parser.add_mutually_exclusive_group()
group.add_argument('-r', '--result', help="Optional list of SPARTA dump result files", nargs='+')
group.add_argument('-rf','--resultfile', help="Optional filename containing path names of SPARTA dump result files")
parser.add_argument('-rn','--rname', default=None,help="Name of variables in SPARTA dump result files (only use with [-r | -rf] and [-t|-tb])")
parser.add_argument('-xc', '--xchunk', \
help="Optional x grid chunk size (positive integer; default 100)", \
default=100, type=int )
parser.add_argument('-yc', '--ychunk', \
help="Optional y grid chunk size (positive integer; default 100)", \
default=100, type=int )
parser.add_argument('-zc', '--zchunk', \
help="Optional z grid chunk size (positive integer; default 100)", \
default=100, type=int )
# test parser
# args = parser.parse_args(['in.post','Steady','-rb','-r','1.restart','-rn','namelist.txt'])
args = parser.parse_args()
if args.tecplot and args.tecplot_binary:
args.tecplot = False
print ("WARNING: Only TecPlot Binary plt files will be created")
if args.rname:
if os.path.exists(args.rname):
with open(args.rname,"r") as f:
rname = [i.strip() for i in f.readlines() if i.strip()[0] != '#']
else:
print( "Errir: File "+args.rname+"is not found")
sys.exit(1)
else:
rname = None
try:
gdf = open(args.sparta_grid_description_file, "r")
except IOError:
print ("Unable to open SPARTA surf input file: ", args.sparta_grid_description_file)
sys.exit(1)
if not args.tecplot and not args.tecplot_binary:
if os.path.isdir(args.paraview_output_file):
print ("ParaView output directory exists: "+ args.paraview_output_file)
print ("Can't out put data in paraview format, try tecplot")
sys.exit(1)
if args.xchunk < 1:
print ("Invalid xchunk size given: ", args.xchunk)
sys.exit(1)
if args.ychunk < 1:
print ("Invalid ychunk size given: ", args.ychunk)
sys.exit(1)
if args.zchunk < 1:
print ("Invalid zchunk size given: ", args.zchunk)
sys.exit(1)
grid_desc = {}
read_grid_description_file(gdf, grid_desc)
gdf.close()
if "dimension" not in grid_desc:
print ("Error: grid description file does not have a dimension statement: ", args.sparta_grid_description_file)
sys.exit(1)
if "create_box" not in grid_desc:
print ("Error: grid description file does not have a create_box statement: ", args.sparta_grid_description_file)
sys.exit(1)
if "read_grid" not in grid_desc and "create_grid" not in grid_desc:
print ("Error: grid description file does not have a read_grid or a create_grid statement: ", args.sparta_grid_description_file)
sys.exit(1)
if "slice" in grid_desc and (args.tecplot or args.tecplot_binary):
print ("Error: Grid slice output not available with TecPlot file output.")
sys.exit(1)
if "slice" not in grid_desc:
if os.path.isfile(args.paraview_output_file + '.pvd'):
print ("ParaView output file exists: "+ args.paraview_output_file + '.pvd')
sys.exit(1)
else:
for idx, slice in enumerate(grid_desc["slice"]):
file_name = args.paraview_output_file + '_slice' + str(idx) + "-" +\
str(round(slice["nx"],4)) + "_" + \
str(round(slice["ny"],4)) + "_" + \
str(round(slice["nz"],4))
if os.path.isfile(file_name + '.pvd'):
print ("ParaView output file exists: "+ file_name + '.pvd')
sys.exit(1)
if "read_grid" in grid_desc:
create_grid_from_grid_file(grid_desc)
time_steps_dict = {}
time_steps_file_list = []
if args.result:
for f in args.result:
time_steps_file_list.extend(glob.glob(f))
elif args.resultfile:
try:
rf = open(args.resultfile, "r")
for name in rf:
time_steps_file_list.append(name.rstrip())
rf.close()
except IOError:
print ("Unable to open SPARTA result file input list file: "+ args.result_file)
sys.exit(1)
if not time_steps_file_list:
time_steps_dict[0] = []
read_time_steps(time_steps_file_list, time_steps_dict)
chunking = []
find_chunking(chunking, grid_desc, args)
sys.stdin = open(os.devnull)
print ("Processing "+str(len(chunking))+ " grid chunk(s).")
report_chunk_complete.num_chunks = len(chunking)
if os.path.isdir (args.paraview_output_file):
print("WARNING: root directory %s exists: "%(args.paraview_output_file,))
else:
os.mkdir(args.paraview_output_file)
if platform.system() == 'Linux' or platform.system() == 'Darwin':
import multiprocessing as mp
pool = mp.Pool()
for idx, chunk in enumerate(chunking):
pool.apply_async(process_grid_chunk, \
args=(idx, chunk, len(chunking), grid_desc,
time_steps_dict, args.paraview_output_file, args.tecplot, args.tecplot_binary,rname), \
callback = report_chunk_complete)
pool.close()
pool.join()
else:
for idx, chunk in enumerate(chunking):
res = process_grid_chunk(idx, chunk, len(chunking), grid_desc,
time_steps_dict, args.paraview_output_file, args.tecplot,args.tecplot_binary,rname)
report_chunk_complete(res)
if not (args.tecplot or args.tecplot_binary):
if "slice" not in grid_desc:
write_pvd_file(sorted(time_steps_dict.keys()), args.paraview_output_file, chunking, 0)
else:
for idx, slice in enumerate(report_chunk_complete.filepaths):
file_name = args.paraview_output_file + '_slice' + str(idx) + "-" + \
str(round(grid_desc["slice"][slice]["nx"],4)) + "_" + \
str(round(grid_desc["slice"][slice]["ny"],4)) + "_" + \
str(round(grid_desc["slice"][slice]["nz"],4))
write_pvd_file(sorted(time_steps_dict.keys()), file_name, chunking, slice)
print ("Done.")
|
luohancfd/FluidDynamicTools
|
SPARTA_Tools/grid2paraview.py
|
Python
|
gpl-3.0
| 43,697
|
[
"ParaView",
"VTK"
] |
7fbf60167f5c98e9752cd3ef5cba58ef418f61f6f1716e6a6bc5750fb76ce6fb
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Copyright (C) 2005-2013 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., or visit: http://www.gnu.org/.
##
## Author(s): Stoq Team <stoq-devel@async.com.br>
##
"""Payment groups, a set of payments
The five use cases for payment groups are:
- Sale
- Purchase
- Renegotiation
- Stockdecreae
- Lonely payments
All of them contains a set of payments and they behaves slightly
differently
"""
# pylint: enable=E1101
from kiwi.currency import currency
from storm.expr import And, In, Not
from storm.references import Reference
from zope.interface import implementer
from stoqlib.database.properties import IdCol
from stoqlib.domain.base import Domain
from stoqlib.domain.interfaces import IContainer
from stoqlib.domain.payment.payment import Payment
from stoqlib.lib.translation import stoqlib_gettext
_ = stoqlib_gettext
@implementer(IContainer)
class PaymentGroup(Domain):
"""A set of |payments|, all related to the same
|sale|, |purchase|, |paymentrenegotiation| or |stockdecrease|.
The set of payments can also be lonely, eg not associated with one of
objects mentioned above.
A payer is paying the recipient who's receiving the |payments|.
"""
__storm_table__ = 'payment_group'
payer_id = IdCol(default=None)
#: the |person| who is paying this group
payer = Reference(payer_id, 'Person.id')
recipient_id = IdCol(default=None)
#: the |person| who is receiving this group
recipient = Reference(recipient_id, 'Person.id')
# XXX: Rename to renegotiated
renegotiation_id = IdCol(default=None)
#: the payment renegotation this group belongs to
renegotiation = Reference(renegotiation_id, 'PaymentRenegotiation.id')
#: The |sale| if this group is part of one
sale = Reference('id', 'Sale.group_id', on_remote=True)
#: The |purchase| if this group is part of one
purchase = Reference('id', 'PurchaseOrder.group_id', on_remote=True)
#: the payment renegotation the |payments| of this group belongs to
_renegotiation = Reference('id', 'PaymentRenegotiation.group_id', on_remote=True)
#: The |stockdecrease| if this group is part of one
stock_decrease = Reference('id', 'StockDecrease.group_id', on_remote=True)
#
# IContainer implementation
#
def add_item(self, payment):
payment.group = self
def remove_item(self, payment):
assert payment.group == self, payment.group
payment.group = None
def get_items(self):
store = self.store
return store.find(Payment, group=self).order_by(
Payment.identifier)
#
# Properties
#
@property
def payments(self):
"""Returns all payments of this group
:returns: a list of |payments|
"""
return self.get_items()
@property
def installments_number(self):
"""The number of installments(|payments|) that are part of this group."""
return self.payments.count()
#
# Private
#
def _get_paid_payments(self):
return self.store.find(Payment,
And(Payment.group_id == self.id,
In(Payment.status,
[Payment.STATUS_PAID,
Payment.STATUS_REVIEWING,
Payment.STATUS_CONFIRMED])))
def _get_preview_payments(self):
return self.store.find(Payment,
status=Payment.STATUS_PREVIEW,
group=self)
def _get_payments_sum(self, payments, attr):
in_payments_value = payments.find(
Payment.payment_type == Payment.TYPE_IN).sum(attr) or 0
out_payments_value = payments.find(
Payment.payment_type == Payment.TYPE_OUT).sum(attr) or 0
if self.sale or self._renegotiation:
return currency(in_payments_value - out_payments_value)
elif self.purchase:
return currency(out_payments_value - in_payments_value)
# FIXME: Is this right for payments not linked to a
# sale/purchase/renegotiation?
return currency(payments.sum(attr) or 0)
#
# Public API
#
def confirm(self):
"""Confirms all |payments| in this group
Confirming the payment group means that the customer has
confirmed the payments. All individual payments are set to
pending.
"""
for payment in self._get_preview_payments():
payment.set_pending()
def pay(self):
"""Pay all |payments| in this group
"""
for payment in self.get_valid_payments():
if payment.is_paid():
continue
payment.pay()
def pay_method_payments(self, method_name):
"""Pay all |payments| of a method in this group
:param method_name: the method of the payments to be paid
"""
for payment in self.get_valid_payments():
if payment.is_of_method(method_name) and not payment.is_paid():
payment.pay()
def cancel(self):
"""Cancel all pending |payments| in this group
"""
for payment in self.get_pending_payments():
if not payment.is_cancelled():
payment.cancel()
def get_total_paid(self):
"""Returns the sum of all paid |payment| values within this group.
:returns: the total paid value
"""
return self._get_payments_sum(self._get_paid_payments(),
Payment.value)
def get_total_value(self):
"""Returns the sum of all |payment| values.
This will consider all payments ignoring just the cancelled ones.
If you want to ignore preview payments too, use
:meth:`.get_total_confirmed_value` instead
:returns: the total payment value or zero.
"""
return self._get_payments_sum(self.get_valid_payments(),
Payment.value)
def get_total_to_pay(self):
"""Returns the total amount to be paid to have the group fully paid.
"""
payments = self.store.find(
Payment,
And(Payment.group_id == self.id,
Payment.status == Payment.STATUS_PENDING))
return self._get_payments_sum(payments, Payment.value)
def get_total_confirmed_value(self):
"""Returns the sum of all confirmed payments values
This will consider all payments ignoring cancelled and preview
ones, that is, if a payment is confirmed/reviewing/paid it will
be summed.
If you want to consider the preview ones too, use
:meth:`.get_total_value` instead
:returns: the total confirmed payments value
"""
payments = self.store.find(
Payment,
And(Payment.group_id == self.id,
Not(In(Payment.status,
[Payment.STATUS_CANCELLED, Payment.STATUS_PREVIEW]))))
return self._get_payments_sum(payments, Payment.value)
# FIXME: with proper database transactions we can probably remove this
def clear_unused(self):
"""Delete payments of preview status associated to the current
payment_group. It can happen if user open and cancel this wizard.
"""
for payment in self._get_preview_payments():
self.remove_item(payment)
payment.delete()
def get_description(self):
"""Returns a small description for the payment group which will be
used in payment descriptions
:returns: the description
"""
# FIXME: This is hack which won't scale. But I don't know
# a better solution right now. Johan 2008-09-25
if self.sale:
return _(u'sale %s') % self.sale.identifier
elif self.purchase:
return _(u'order %s') % self.purchase.identifier
elif self._renegotiation:
return _(u'renegotiation %s') % self._renegotiation.identifier
elif self.stock_decrease:
return _(u'stock decrease %s') % self.stock_decrease.identifier
# FIXME: Add a proper description
else:
return u''
def get_pending_payments(self):
"""Returns a list of pending |payments|
:returns: list of |payments|
"""
return self.store.find(Payment, group=self,
status=Payment.STATUS_PENDING)
def get_parent(self):
"""Return the |sale|, |purchase|, |paymentrenegotiation| or
|stockdecrease| this group is part of.
:returns: the object this group is part of or ``None``
"""
if self.sale:
return self.sale
elif self.purchase:
return self.purchase
elif self._renegotiation:
return self._renegotiation
elif self.stock_decrease:
return self.stock_decrease
return None
def get_total_discount(self):
"""Returns the sum of all |payment| discounts.
:returns: the total payment discount or zero.
"""
return self._get_payments_sum(self.get_valid_payments(),
Payment.discount)
def get_total_interest(self):
"""Returns the sum of all |payment| interests.
:returns: the total payment interest or zero.
"""
return self._get_payments_sum(self.get_valid_payments(),
Payment.interest)
def get_total_penalty(self):
"""Returns the sum of all |payment| penalties.
:returns: the total payment penalty or zero.
"""
return self._get_payments_sum(self.get_valid_payments(),
Payment.penalty)
def get_valid_payments(self):
"""Returns all |payments| that are not cancelled.
:returns: list of |payments|
"""
return self.store.find(Payment,
And(Payment.group_id == self.id,
Payment.status != Payment.STATUS_CANCELLED))
def get_payments_by_method_name(self, method_name):
"""Returns all |payments| of a specific |paymentmethod| within this group.
:param unicode method_name: the name of the method
:returns: list of |payments|
"""
from stoqlib.domain.payment.method import PaymentMethod
return self.store.find(
Payment,
And(Payment.group_id == self.id,
Payment.method_id == PaymentMethod.id,
PaymentMethod.method_name == method_name))
|
andrebellafronte/stoq
|
stoqlib/domain/payment/group.py
|
Python
|
gpl-2.0
| 11,462
|
[
"VisIt"
] |
f65129e46a67f4d438dc1aed8a6fca7f738b98e0661ebcd80ecd21ff6118835f
|
#!/usr/bin/env python
# add paths
import os, sys
for p in os.environ['PATH'].split(':'): sys.path.append(p)
# import modules
from re import findall
from itertools import product
from netCDF4 import Dataset as nc
from optparse import OptionParser
from numpy.ma import masked_array
from filespecs import BiasCorrectFile
from biascorrecter import BiasCorrecter
from os.path import split, splitext, sep
from numpy import intersect1d, zeros, ones
import ruamel.yaml
parser = OptionParser()
parser.add_option("-i", "--infile", dest = "infile", default = "", type = "string",
help = "Input aggregated file", metavar = "FILE")
parser.add_option("-r", "--reffile", dest = "reffile", default = "", type = "string",
help = "Reference data netcdf file", metavar = "FILE")
parser.add_option("-a", "--agglvl", dest = "agglvl", default = "gadm0", type = "string",
help = "Aggregation level (e.g., gadm0, fpu, kg)")
parser.add_option("-o", "--outdir", dest = "outdir", default = "", type = "string",
help = "Output directory to save results")
parser.add_option("-p", "--params", dest = "params", default = "", type = "string",
help = "YAML params file")
options, args = parser.parse_args()
infile = options.infile
reffile = options.reffile
agglvl = options.agglvl
outdir = options.outdir
params = ruamel.yaml.load(open(options.params, 'r'), ruamel.yaml.RoundTripLoader)
dt = params['dt']
mp = params['mp']
cr = params['cr']
ndt, nmp, ncr = len(dt), len(mp), len(cr)
crop = split(infile)[1].split('_')[3] # pull crop name from file name
with nc(reffile) as fref: # pull reference data
aref = fref.variables[agglvl][:]
aggunits = fref.variables[agglvl].units
agglongname = fref.variables[agglvl].long_name
tref = fref.variables['time'][:]
tref_units = fref.variables['time'].units
dtidx = fref.variables['dt'].long_name.split(', ').index('none')
mpidx = fref.variables['mp'].long_name.split(', ').index('true')
var = 'yield_' + crop
if var in fref.variables:
yield_ref = fref.variables[var][:, :, dtidx, mpidx]
else:
print 'Crop %s unavailable in reference file %s. Exiting . . .' % (crop, reffile)
sys.exit()
with nc(infile) as fin: # pull input data
ain = fin.variables[agglvl][:]
tin = fin.variables['time'][:]
tin_units = fin.variables['time'].units
scen = fin.variables['scen'].long_name.split(', ')
sum_idx = fin.variables['irr'].long_name.split(', ').index('sum')
var = 'yield_' + agglvl
if var in fin.variables:
yield_in = fin.variables[var][:, :, :, sum_idx]
else:
print 'Yield variable not found in file %s. Exiting . . .' % infile
sys.exit()
tref += int(findall(r'\d+', tref_units)[0]) # get reference time
tin += int(findall(r'\d+', tin_units)[0]) - 1 # get simulation time
aggs = intersect1d(ain, aref) # find common gadm indices
naggs, ntime, nscen = len(aggs), len(tin), len(scen)
if not naggs: raise Exception('No common aggregates')
yield_sim_common = masked_array(zeros((naggs, len(tin), nscen)), mask = ones((naggs, len(tin), nscen)))
yield_ref_common = masked_array(zeros((naggs, len(tref))), mask = ones((naggs, len(tref))))
for i in range(naggs):
yield_sim_common[i] = yield_in[list(ain).index(aggs[i])]
yield_ref_common[i] = yield_ref[list(aref).index(aggs[i])]
sh = (naggs, ntime, nscen, ndt, nmp, ncr)
yield_detr = masked_array(zeros(sh), mask = ones(sh))
yield_retr = masked_array(zeros(sh), mask = ones(sh))
for g, s in product(range(naggs), range(nscen)):
yref, ysim = yield_ref_common[g], yield_sim_common[g, :, s]
if not yref.mask.all() and not ysim.mask.all():
for d, m, c in product(range(ndt), range(nmp), range(ncr)):
bc = BiasCorrecter(dt[d], mp[m], cr[c])
detr, retr = bc.correct(ysim, yref, tin, tref)
yield_detr[g, :, s, d, m, c] = detr
yield_retr[g, :, s, d, m, c] = retr
fn = outdir + sep + splitext(split(infile)[1])[0] + '.biascorr.nc4' # create file
fout = BiasCorrectFile(fn, aggs, agglvl, aggunits, agglongname, tin, scen, dt, mp, cr)
fout.append('yield_detrend', yield_detr, (agglvl, 'time', 'scen', 'dt', 'mp', 'cr'), 't ha-1 yr-1', 'average detrended yield') # append to file
fout.append('yield_retrend', yield_retr, (agglvl, 'time', 'scen', 'dt', 'mp', 'cr'), 't ha-1 yr-1', 'average retrended yield')
|
RDCEP/ggcmi
|
bin/biascorr/biascorrect.py
|
Python
|
agpl-3.0
| 4,488
|
[
"NetCDF"
] |
a91a6bb1a1b721587da05fcdd7e68cc27484726645cc88a5e561d215263b1545
|
#pseudo-it.py v1.0.4
#iterative pseudoreference generation using BWA and the GATK
#Brice A. J. Sarver
#v0.1.0 completed 3 June 2015
#v0.8.0 release 18 Jan 2016; bug fixes, minor functionality improvements
#v1.0.1 release 8 May 2016; major functionality improvements, rewrite with Python 3.5+, bug fixes
#v1.1.0 release 8 June 2016; additional functionality (e.g., masking low-quality variants) and bug fixes
import sys
import os
import argparse
import itertools
import subprocess
import fnmatch
from Bio import SeqIO
parser = argparse.ArgumentParser(description="Iterative pseudoreference generation with BWA and the GATK", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('iterations', help='number of iterations. one iteration will not inject IUPAC ambiguities', type=int)
parser.add_argument('reference', help='path to the reference/contigs/scaffolds used for the first iteration')
parser.add_argument('prefix', help="prefix to use on output files; this is also added to the SM field with AddOrReplaceReadGroups")
required = parser.add_argument_group('required arguments')
required.add_argument('--PE1', '-1', dest='pe1', help="Data: PE1. PE, SE, OR PE+SE DATA IS REQUIRED", default=None)
required.add_argument('--PE2', '-2', dest='pe2', help="Data: PE2. PE, SE, OR PE+SE DATA IS REQUIRED", default=None)
required.add_argument('--SE', '-s', dest='se', help="Data: SE. PE, SE, OR PE+SE DATA IS REQUIRED", default=None)
parser.add_argument('--proc', '-np', dest='proc', type=int, help='number of cores to use for multithreaded applications', default=1)
parser.add_argument('--bed', '-b', dest='bed', help="a BED file of regions to call genotypes via GATK's -L", default=None)
parser.add_argument('--haplotype', dest='haplo', help="invoke to use HaplotypeCaller instead of UnifiedGenotyper. runtime will increase dramatically. indels are still ignored. HaplotypeCaller cannot be threaded", action='store_true')
parser.add_argument('--nocall', dest='nocall', help="identify nocall and low-quality sites and mask these in the final reference. has the effect of changing bases that cannot be called to N, by default. requires more than a single iteration at currently; this functionality may be introduced in subsequent versions", action='store_true')
parser.add_argument('--nocall-filter', '-ncf', dest='ncf', help='additional filtering threshold for low-quality bases to be used for the masking step', default='--filterExpression "MQ < 30.0 || DP < 10 || DP > 60"')
parser.add_argument('--soft-masking', dest='soft', help='soft mask (i.e., replace with lowercase) instead of hard mask (i.e., replace with N). requires `--nocall`', action='store_true')
parser.add_argument('--iupac', dest='iupac', help='invoke to inject IUPAC ambiguity codes for heterozygotes into the final reference', action='store_true')
parser.add_argument('--keep-haploid-reference', dest='haploid', help="if using '--iupac', this argument also keeps a haploid reference this reference is not masked", action='store_true')
parser.add_argument('--filter', '-f', dest='fil', help='overwrite the default filter used to select variants. you MUST specify --filterName and might want to consider selecting something meaningful if you plan to use the VCFs again. you can also specify multiple filters by passing multiple --filterExpression and --filterName arguments (will need a --filterExpression for each additional filter)', default='"MQ < 30.0 || DP < 5 || DP > 60" --filterName "mq30-5dp60"')
parser.add_argument('--nct', dest='nct', help="number of compute threads for the GATK's UnifiedGenotyper. total CPU usage is nct*nt", default=1)
parser.add_argument('--nt', dest='nt', help="number of data threads for the GATK's UnifiedGenotyper. total CPU usage is nct*nt", default=1)
#parser.add_argument('--resume', dest="resumeRun", help="resume a previously failed run [WORK IN PROGRESS]")
#parser.add_argument('--clean', dest="cleanRun", help="blow away all intermediate files")
args = parser.parse_args()
#argument sanity checks
if args.iterations == 1 and args.nocall:
sys.exit("One iteration and injection of nocalls is not currently supported. If iterations == 1, please do not invoke --nocall")
elif args.haploid and not args.iupac:
sys.exit("You cannot specify an additional haploid reference without --iupac (the process normally generates a haploid reference)")
elif not args.pe1 and not args.pe2 and not args.se:
sys.exit("You need to specify data with the --PE1, --PE2, and --SE options")
elif args.pe1 and not args.pe2:
sys.exit("You specified PE1 but not its mate (PE2)")
elif not args.pe1 and args.pe2:
sys.exit("You specified PE2 but not its mate (PE1)")
elif args.soft and not args.nocall:
sys.exit("You specified soft masking (--soft-masking) without specifying nocall masking (--nocall)")
#print(args)
##############################
#FUTURE TO-DO: have python attempt to locate the executables itself and pass these to subprocess.
# as-is, this script is not really portable because executable paths are hard-coded (but
# probably housed in /usr/local/bin on many systems. easy to change here with sed or equivalent)
# perhaps pass as a file (config.txt)?
#picardpath = ""
#gatkpath = ""
#bwapath = ""
#samtoolspath = ""
#gatkpath = ""
#picard = ""
#samtools = ""
#bwa = ""
#GATK = ""
#
# additionally, all of this was coded modularly on purpose so I can easily extend it to a true package
# especially, the contig renaming function needs to be kicked out as its own thing
# perhaps still get IDs and store that list as a global since the footprint can be large for eukaryotes?
#
# the haploid in addition to diploid reference output was an afterthought and can probably be
# implemented better...
##############################
def make_indices(reference):
print("Generate dictionaries and indices (if needed)...")
#for ease of use, this picks everything before the first period
#consider symlinking to 'species.fa' or something if this is an issue (e.g., lots of '.')
dictprefix = os.path.splitext(reference)[0]
if not os.path.isfile('{}.dict'.format(dictprefix)):
subprocess.check_call('java -jar /usr/local/bin/picard.jar CreateSequenceDictionary R={} O={}.dict'.format(reference, dictprefix), shell=True)
if not os.path.isfile('{}.fai'.format(reference)):
print("faidx...")
subprocess.check_call('samtools faidx {}'.format(reference), shell=True)
if not os.path.isfile('{}.bwt'.format(reference)):
print("BWA index...")
subprocess.check_call('bwa index {}'.format(reference), shell=True)
def first_iteration(iterations, reference, prefix, proc, bed, haplo, fil, pe1, pe2, se, nct, nt):
finalseqs = []
#if bed file exists, define it as a variable now
if bed:
bedoption = "-L " + bed
else:
bedoption = ""
if nct:
nct = '-nct {}'.format(nct)
else:
nct = ""
if nt:
nt = '-nt {}'.format(nt)
else:
nt = ""
#create dictionaries, etc.
make_indices(reference)
#map PE and SE reads | convert to bam
if pe1 and pe2:
print("PE BWA map | convert to BAM...")
subprocess.check_call('bwa mem -M -t {} {} {} {} | samtools view -Sb - > {}.iteration1.pe.bam 2> {}.iteration1.pe.bam.stderr'.format(proc, reference, pe1, pe2, prefix, prefix), shell=True)
if se:
print("SE BWA map | conver to BAM...")
subprocess.check_call('bwa mem -M -t {} {} {} | samtools view -Sb - > {}.iteration1.se.bam 2> {}.iteration1.se.bam.stderr'.format(proc, reference, se, prefix, prefix), shell=True)
if pe1 and pe2 and se:
print("Merge BAMs...")
subprocess.check_call('java -jar /usr/local/bin/picard.jar MergeSamFiles I={}.iteration1.pe.bam I={}.iteration1.se.bam O={}.iteration1.merged.bam USE_THREADING=TRUE VALIDATION_STRINGENCY=LENIENT'.format(prefix, prefix, prefix), shell=True)
elif pe1 and pe2 and not se:
os.rename('{}.iteration1.pe.bam'.format(prefix), '{}.iteration1.merged.bam'.format(prefix))
elif se and not pe1 and not pe2:
os.rename('{}.iteration1.se.bam'.format(prefix), '{}.iteration1.merged.bam'.format(prefix))
#add readgroups and mark dups
print("AddOrReplaceReadGroups...")
subprocess.check_call('java -jar /usr/local/bin/picard.jar AddOrReplaceReadGroups I={}.iteration1.merged.bam O={}.iteration1.merged.RG.bam SO=coordinate LB=spret_exome PL=illumina PU=misc SM={} VALIDATION_STRINGENCY=LENIENT'.format(prefix, prefix, prefix), shell=True)
print("Mark duplicates...")
subprocess.check_call('java -jar /usr/local/bin/picard.jar MarkDuplicates I={}.iteration1.merged.RG.bam O={}.iteration1.merged.RG_dedup.bam VALIDATION_STRINGENCY=LENIENT M=iteration1.dup_metrics'.format(prefix, prefix), shell=True)
#indel realignment
print("Index BAMs...")
subprocess.check_call('samtools index {}.iteration1.merged.RG_dedup.bam'.format(prefix), shell=True)
print("Identify targets to realign...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T RealignerTargetCreator -R {} -I {}.iteration1.merged.RG_dedup.bam -o iteration1.indel_intervals.list {} -nt {}'.format(reference, prefix, bedoption, proc), shell=True)
print("Realign Indels...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T IndelRealigner -R {} -I {}.iteration1.merged.RG_dedup.bam -targetIntervals iteration1.indel_intervals.list -o {}.iteration1.realigned.bam --filter_bases_not_stored'.format(reference, prefix, prefix), shell=True)
#variant calling
if haplo:
print("HaplotypeCaller...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T HaplotypeCaller -R {} -I {}.iteration1.realigned.bam --genotyping_mode DISCOVERY -stand_emit_conf 10 -stand_call_conf 30 -o {}.iteration1.raw.vcf {}'.format(reference, prefix, prefix, bedoption), shell=True)
else:
print("UnifiedGenotyper...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T UnifiedGenotyper -R {} -I {}.iteration1.realigned.bam --genotyping_mode DISCOVERY -stand_emit_conf 10 -stand_call_conf 30 -o {}.iteration1.raw.vcf {} {} {}'.format(reference, prefix, prefix, bedoption, nct, nt), shell=True)
#selecting SNPs and filtering
print("Select SNPs from VCF...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T SelectVariants -R {} -V {}.iteration1.raw.vcf -o {}.iteration1.snps.vcf --selectTypeToInclude SNP'.format(reference, prefix, prefix), shell=True)
print("Filter variants...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T VariantFiltration -R {} -V {}.iteration1.snps.vcf --filterExpression {} -o {}.iteration1.filtered.vcf'.format(reference, prefix, fil, prefix), shell=True)
#consensus calling and filtering
print("Generate consensus from reference and variants...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T FastaAlternateReferenceMaker -R {} -o {}.gatk.iteration1.consensus.fa -V {}.iteration1.filtered.vcf'.format(reference, prefix, prefix), shell=True)
with open("{}.gatk.iteration1.consensus.fa".format(prefix), "rU") as consensus:
finalseqs = list(SeqIO.parse(consensus, "fasta"))
for i in range(0, len(ids), 1):
finalseqs[i].id = finalseqs[i].name = finalseqs[i].description = ids[i]
with open("{}.gatk.iteration1.consensus.fa".format(prefix), "w") as outfile:
SeqIO.write(finalseqs, outfile, "fasta")
def other_iterations(iterations, prefix, proc, totalIterations, bed, haplo, ncall, iupac, fil, pe1, pe2, se, nct, nt, haploid, ncf, soft):
finalseqs = []
#again, define BED argument
if bed:
bedoption = "-L " + bed
else:
bedoption = ""
if nct:
nct = '-nct {}'.format(nct)
else:
nct = ""
if nt:
nt = '-nt {}'.format(nt)
else:
nt = ""
previousIteration = iterations - 1
reference = '{}.gatk.iteration{}.consensus.fa'.format(prefix, previousIteration)
make_indices(reference)
if pe1 and pe2:
print("PE BWA map | covert to BAM...")
subprocess.check_call('bwa mem -M -t {} {} {} {} | samtools view -Sb - > {}.iteration{}.pe.bam 2> {}.iteration{}.pe.bam.stderr'.format(proc, reference, pe1, pe2, prefix, iterations, prefix, iterations), shell=True)
if se:
print("SE BWA map | convert to BAM...")
subprocess.check_call('bwa mem -M -t {} {} {} | samtools view -Sb - > {}.iteration{}.se.bam 2> {}.iteration{}.se.bam.stderr'.format(proc, reference, se, prefix, iterations, prefix, iterations), shell=True)
if pe1 and pe2 and se:
print("Merge BAMs...")
subprocess.check_call('java -jar /usr/local/bin/picard.jar MergeSamFiles I={}.iteration{}.pe.bam I={}.iteration{}.se.bam O={}.iteration{}.merged.bam USE_THREADING=TRUE VALIDATION_STRINGENCY=LENIENT'.format(prefix, iterations, prefix, iterations, prefix, iterations), shell=True)
elif pe1 and pe2 and not se:
os.rename('{}.iteration{}.pe.bam'.format(prefix, iterations), '{}.iteration{}.merged.bam'.format(prefix, iterations))
elif se and not pe1 and not pe2:
os.rename('{}.iteration{}.se.bam'.format(prefix, iterations), '{}.iteration{}.merged.bam'.format(prefix, iterations))
#sort bam
print("Sorting BAMs...")
subprocess.check_call('samtools sort -o {}.iteration{}.merged.sorted.bam -T hold.sorting -@ {} {}.iteration{}.merged.bam'.format(prefix, iterations, proc, prefix, iterations), shell=True)
#add readgroups and mark dups
print("AddOrReplaceReadGroups...")
subprocess.check_call('java -jar /usr/local/bin/picard.jar AddOrReplaceReadGroups I={}.iteration{}.merged.bam O={}.iteration{}.merged.RG.bam SO=coordinate LB=spret_exome PL=illumina PU=misc SM={} VALIDATION_STRINGENCY=LENIENT'.format(prefix, iterations, prefix, iterations, prefix), shell=True)
print("Mark duplicates...")
subprocess.check_call('java -jar /usr/local/bin/picard.jar MarkDuplicates I={}.iteration{}.merged.RG.bam O={}.iteration{}.merged.RG_dedup.bam VALIDATION_STRINGENCY=LENIENT M=iteration1.dup_metrics'.format(prefix, iterations, prefix, iterations), shell=True)
#indel realignment
print("Index BAMs...")
subprocess.check_call('samtools index {}.iteration{}.merged.RG_dedup.bam'.format(prefix, iterations), shell=True)
print("Identify targets to realign...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T RealignerTargetCreator -R {} -I {}.iteration{}.merged.RG_dedup.bam -o iteration{}.indel_intervals.list {} -nt {}'.format(reference, prefix, iterations, iterations, bedoption, proc), shell=True)
print("Realign Indels...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T IndelRealigner -R {} -I {}.iteration{}.merged.RG_dedup.bam -targetIntervals iteration{}.indel_intervals.list -o {}.iteration{}.realigned.bam --filter_bases_not_stored'.format(reference, prefix, iterations, iterations, prefix, iterations), shell=True)
#variant calling
if haplo:
print("HaplotypeCaller...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T HaplotypeCaller -R {} -I {}.iteration{}.realigned.bam --genotyping_mode DISCOVERY -stand_emit_conf 10 -stand_call_conf 30 -o {}.iteration{}.raw.vcf {}'.format(reference, prefix, iterations, prefix, iterations, bedoption), shell=True)
else:
print("UnifiedGenotyper...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T UnifiedGenotyper -R {} -I {}.iteration{}.realigned.bam --genotyping_mode DISCOVERY -stand_emit_conf 10 -stand_call_conf 30 -o {}.iteration{}.raw.vcf {} {} {}'.format(reference, prefix, iterations, prefix, iterations, bedoption, nct, nt), shell=True)
#selecting SNPs and filtering
print("Select SNPs from VCF...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T SelectVariants -R {} -V {}.iteration{}.raw.vcf -o {}.iteration{}.snps.vcf --selectTypeToInclude SNP'.format(reference, prefix, iterations, prefix, iterations), shell=True)
print("Filter variants...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T VariantFiltration -R {} -V {}.iteration{}.snps.vcf --filterExpression {} -o {}.iteration{}.filtered.vcf'.format(reference, prefix, iterations, fil, prefix, iterations), shell=True)
#consensus calling and filtering; write out IUPAC ambiguities on last iteration if indicated
if totalIterations == iterations:
if ncall:
if iupac:
print("Generate consensus from reference and variants...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T FastaAlternateReferenceMaker -R {} -o {}.gatk.iteration{}.consensus.FINAL.fa -V {}.iteration{}.filtered.vcf -IUPAC {}'.format(reference, prefix, iterations, prefix, iterations, prefix), shell=True)
if haploid:
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T FastaAlternateReferenceMaker -R {} -o {}.gatk.iteration{}.consensus.FINAL.haploid.fa -V {}.iteration{}.filtered.vcf'.format(reference, prefix, iterations, prefix, iterations), shell=True)
else:
print("Generate consensus from reference and variants...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T FastaAlternateReferenceMaker -R {} -o {}.gatk.iteration{}.consensus.FINAL.fa -V {}.iteration{}.filtered.vcf'.format(reference, prefix, iterations, prefix, iterations), shell=True)
with open("{}.gatk.iteration{}.consensus.FINAL.fa".format(prefix, iterations), "rU") as consensus:
finalseqs = list(SeqIO.parse(consensus, "fasta"))
for i in range(0, len(ids), 1):
finalseqs[i].id = finalseqs[i].name = finalseqs[i].description = ids[i]
with open("{}.gatk.iteration{}.consensus.FINAL.fa".format(prefix, iterations), "w") as outfile:
SeqIO.write(finalseqs, outfile, "fasta")
print("Emitting all sites...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T UnifiedGenotyper -R {} -I {}.iteration{}.realigned.bam --genotyping_mode DISCOVERY --output_mode EMIT_ALL_SITES -stand_emit_conf 10 -stand_call_conf 30 -o {}.allcalls.vcf {} {}'.format(reference, prefix, iterations, prefix, nct, nt), shell=True)
#I remove logging at the ERROR level because we expect filtering to fail on './.' calls
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T VariantFiltration -R {} -V {}.allcalls.vcf {} --filterName "allcallfilter" -o {}.allcalls.filtered.vcf -l ERROR'.format(reference, prefix, ncf, prefix), shell=True)
print("filtering of nocalls...")
#whip up a quick BED from the VCF using awk and throw it to bedtools. of all tests, this remains fastest
subprocess.check_call('''awk '(/\.\/\./ || /allcallfilter/) && !/^\#/ {{OFS="\t"; print $1, $2-1, $2}}' {}.allcalls.filtered.vcf | bedtools merge -i - > all_positions_to_mask.bed'''.format(prefix), shell=True)
if soft:
subprocess.check_call("bedtools maskfasta -fi {}.gatk.iteration{}.consensus.FINAL.fa -fo {}.masked.fa -bed all_positions_to_mask.bed -soft".format(prefix, iterations, prefix), shell=True)
else:
subprocess.check_call("bedtools maskfasta -fi {}.gatk.iteration{}.consensus.FINAL.fa -fo {}.masked.fa -bed all_positions_to_mask.bed".format(prefix, iterations, prefix), shell=True)
elif iupac:
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T FastaAlternateReferenceMaker -R {} -o {}.gatk.iteration{}.consensus.FINAL.fa -V {}.iteration{}.filtered.vcf -IUPAC {}'.format(reference, prefix, iterations, prefix, iterations, prefix), shell=True)
if haploid:
print("Generate consensus from reference and variants...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T FastaAlternateReferenceMaker -R {} -o {}.gatk.iteration{}.consensus.FINAL.haploid.fa -V {}.iteration{}.filtered.vcf'.format(reference, prefix, iterations, prefix, iterations), shell=True)
with open("{}.gatk.iteration{}.consensus.FINAL.fa".format(prefix, iterations), "rU") as consensus:
finalseqs = list(SeqIO.parse(consensus, "fasta"))
for i in range(0, len(ids), 1):
finalseqs[i].id = finalseqs[i].name = finalseqs[i].description = ids[i]
with open("{}.gatk.iteration{}.consensus.FINAL.fa".format(prefix, iterations), "w") as outfile:
SeqIO.write(finalseqs, outfile, "fasta")
else:
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T FastaAlternateReferenceMaker -R {} -o {}.gatk.iteration{}.consensus.FINAL.fa -V {}.iteration{}.filtered.vcf'.format(reference, prefix, iterations, prefix, iterations), shell=True)
with open("{}.gatk.iteration{}.consensus.FINAL.fa".format(prefix, iterations), "rU") as consensus:
finalseqs = list(SeqIO.parse(consensus, "fasta"))
for i in range(0, len(ids), 1):
finalseqs[i].id = finalseqs[i].name = finalseqs[i].description = ids[i]
with open("{}.gatk.iteration{}.consensus.FINAL.fa".format(prefix, iterations), "w") as outfile:
SeqIO.write(finalseqs, outfile, "fasta")
else:
print("Generate consensus from reference and variants...")
subprocess.check_call('java -jar /usr/local/bin/GenomeAnalysisTK.jar -T FastaAlternateReferenceMaker -R {} -o {}.gatk.iteration{}.consensus.fa -V {}.iteration{}.filtered.vcf'.format(reference, prefix, iterations, prefix, iterations), shell=True)
with open("{}.gatk.iteration{}.consensus.fa".format(prefix, iterations), "rU") as consensus:
finalseqs = list(SeqIO.parse(consensus, "fasta"))
for i in range(0, len(ids), 1):
finalseqs[i].id = finalseqs[i].name = finalseqs[i].description = ids[i]
with open("{}.gatk.iteration{}.consensus.fa".format(prefix, iterations), "w") as outfile:
SeqIO.write(finalseqs, outfile, "fasta")
#if present, rename the haploid references, too
if os.path.isfile('{}.gatk.iteration{}.consensus.FINAL.haploid.fa'.format(prefix, iterations)):
with open("{}.gatk.iteration{}.consensus.FINAL.haploid.fa".format(prefix, iterations), "rU") as consensus:
hapfinalseqs = list(SeqIO.parse(consensus, "fasta"))
for i in range(0, len(ids), 1):
hapfinalseqs[i].id = hapfinalseqs[i].name = hapfinalseqs[i].description = ids[i]
with open("{}.gatk.iteration{}.consensus.FINAL.haploid.fa".format(prefix, iterations), "w") as outfile:
SeqIO.write(hapfinalseqs, outfile, "fasta")
#iteration 1 function call
#FastaAlternateReferenceMaker in the GATK renames the contigs. 'ids' faciliates renaming after injection
ids = []
with open(args.reference) as ref:
for record in SeqIO.parse(ref, "fasta"):
ids.append(record.id)
first_iteration(iterations=args.iterations, reference=args.reference, prefix=args.prefix, proc=args.proc, bed=args.bed, haplo=args.haplo, fil=args.fil, pe1=args.pe1, pe2=args.pe2, se=args.se, nct=args.nct, nt=args.nt)
#and the other iterations
if args.iterations > 1:
for i in range(2, args.iterations + 1, 1):
other_iterations(iterations=i, prefix=args.prefix, proc=args.proc, totalIterations=args.iterations, bed=args.bed, haplo=args.haplo, ncall=args.nocall, iupac=args.iupac, fil=args.fil, pe1=args.pe1, pe2=args.pe2, se=args.se, nct=args.nct, nt=args.nt, haploid=args.haploid, ncf=args.ncf, soft=args.soft)
|
bricesarver/pseudo-it
|
pseudo-it.py
|
Python
|
mit
| 24,135
|
[
"BWA"
] |
f22209b7cd47dd0a2e6353fc648f8c2d51d3bcab6d5023194fb2e430d09c8a1a
|
#
# Copyright 2018-2021 Jan Griesser (U. Freiburg)
# 2014, 2020-2021 Lars Pastewka (U. Freiburg)
# 2014 James Kermode (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2014) James Kermode, King's College London
# Lars Pastewka, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
import random
import pytest
import sys
import numpy as np
from numpy.linalg import norm
from scipy.linalg import eigh
import ase
import ase.io as io
import ase.constraints
from ase.optimize import FIRE
from ase.units import GPa
from ase.lattice.cubic import FaceCenteredCubic
from matscipy.calculators.pair_potential import PairPotential, LennardJonesQuadratic, LennardJonesLinear
from matscipy.elasticity import fit_elastic_constants, elastic_moduli, full_3x3x3x3_to_Voigt_6x6, measure_triclinic_elastic_constants
from matscipy.calculators.calculator import MatscipyCalculator
from matscipy.numerical import numerical_hessian, numerical_forces, numerical_stress
###
def measure_triclinic_elastic_constants_2nd(a, delta=0.001):
r0 = a.positions.copy()
cell = a.cell.copy()
volume = a.get_volume()
e0 = a.get_potential_energy()
C = np.zeros((3, 3, 3, 3), dtype=float)
for i in range(3):
for j in range(3):
a.set_cell(cell, scale_atoms=True)
a.set_positions(r0)
e = np.zeros((3, 3))
e[i, j] += 0.5*delta
e[j, i] += 0.5*delta
F = np.eye(3) + e
a.set_cell(np.matmul(F, cell.T).T, scale_atoms=True)
ep = a.get_potential_energy()
e = np.zeros((3, 3))
e[i, j] -= 0.5*delta
e[j, i] -= 0.5*delta
F = np.eye(3) + e
a.set_cell(np.matmul(F, cell.T).T, scale_atoms=True)
em = a.get_potential_energy()
C[:, :, i, j] = (ep + em - 2*e0) / (delta ** 2)
a.set_cell(cell, scale_atoms=True)
a.set_positions(r0)
return C
def test_forces():
"""
Test the computation of forces for a crystal and a glass
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5)}
atoms = FaceCenteredCubic('H', size=[2,2,2], latticeconstant=1.0)
atoms.rattle(0.01)
b = PairPotential(calc)
atoms.calc = b
f = atoms.get_forces()
fn = numerical_forces(atoms, d=0.0001)
np.testing.assert_allclose(f, fn, atol=1e-2)
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5),
(1, 2): LennardJonesQuadratic(1.5, 0.8, 2.0),
(2, 2): LennardJonesQuadratic(0.5, 0.88, 2.2)}
atoms = io.read('glass_min.xyz')
atoms.rattle(0.01)
b = PairPotential(calc)
atoms.calc = b
f = atoms.get_forces()
fn = numerical_forces(atoms, d=0.0001)
np.testing.assert_allclose(f, fn, atol=1e-2)
@pytest.mark.parametrize('a0', [1.0, 1.5, 2.0, 2.5, 3.0])
def test_crystal_stress(a0):
"""
Test the computation of stresses for a crystal
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5)}
atoms = FaceCenteredCubic('H', size=[2,2,2], latticeconstant=a0)
b = PairPotential(calc)
atoms.calc = b
s = atoms.get_stress()
sn = numerical_stress(atoms, d=0.0001)
np.testing.assert_allclose(s, sn, atol=1e-4)
def test_amorphous_stress():
"""
Test the computation of stresses for a glass
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5),
(1, 2): LennardJonesQuadratic(1.5, 0.8, 2.0),
(2, 2): LennardJonesQuadratic(0.5, 0.88, 2.2)}
atoms = io.read('glass_min.xyz')
b = PairPotential(calc)
atoms.calc = b
s = atoms.get_stress()
sn = numerical_stress(atoms, d=0.0001)
np.testing.assert_allclose(s, sn, atol=1e-4)
def test_hessian():
"""
Test the computation of the Hessian matrix
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5),
(1, 2): LennardJonesQuadratic(1.5, 0.8, 2.0),
(2, 2): LennardJonesQuadratic(0.5, 0.88, 2.2)}
atoms = io.read("glass_min.xyz")
b = PairPotential(calc)
atoms.calc = b
FIRE(atoms, logfile=None).run(fmax=1e-5)
H_numerical = numerical_hessian(atoms, dx=1e-5, indices=None)
H_numerical = H_numerical.todense()
H_analytical = b.get_hessian(atoms, "dense")
np.testing.assert_allclose(H_analytical, H_numerical, atol=1e-4)
H_analytical = b.get_hessian(atoms, "sparse")
H_analytical = H_analytical.todense()
np.testing.assert_allclose(H_analytical, H_numerical, atol=1e-4)
def test_symmetry_dense():
"""
Test the symmetry of the dense Hessian matrix
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5),
(1, 2): LennardJonesQuadratic(1.5, 0.8, 2.0),
(2, 2): LennardJonesQuadratic(0.5, 0.88, 2.2)}
a = io.read('glass_min.xyz')
b = PairPotential(calc)
a.calc = b
FIRE(a, logfile=None).run(fmax=1e-5)
H = b.get_hessian(a, "dense")
np.testing.assert_allclose(np.sum(np.abs(H-H.T)), 0, atol=1e-10)
def test_symmetry_sparse():
"""
Test the symmetry of the dense Hessian matrix
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5),
(1, 2): LennardJonesQuadratic(1.5, 0.8, 2.0),
(2, 2): LennardJonesQuadratic(0.5, 0.88, 2.2)}
a = io.read('glass_min.xyz')
b = PairPotential(calc)
a.calc = b
FIRE(a, logfile=None).run(fmax=1e-5)
H = b.get_hessian(a, "sparse")
H = H.todense()
np.testing.assert_allclose(np.sum(np.abs(H-H.T)), 0, atol=1e-10)
def test_hessian_divide_by_masses():
"""
Test the computation of the Dynamical matrix
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5),
(1, 2): LennardJonesQuadratic(1.5, 0.8, 2.0),
(2, 2): LennardJonesQuadratic(0.5, 0.88, 2.2)}
atoms = io.read("glass_min.xyz")
b = PairPotential(calc)
atoms.calc = b
FIRE(atoms, logfile=None).run(fmax=1e-5)
masses_n = np.random.randint(1, 10, size=len(atoms))
atoms.set_masses(masses=masses_n)
D_analytical = b.get_hessian(atoms, "sparse", divide_by_masses=True)
D_analytical = D_analytical.todense()
H_analytical = b.get_hessian(atoms, "sparse", divide_by_masses=False)
H_analytical = H_analytical.todense()
masses_p = masses_n.repeat(3)
H_analytical /= np.sqrt(masses_p.reshape(-1, 1) * masses_p.reshape(1, -1))
np.testing.assert_allclose(H_analytical, D_analytical, atol=1e-4)
D_analytical = b.get_hessian(atoms, "dense", divide_by_masses=True)
np.testing.assert_allclose(H_analytical, D_analytical, atol=1e-4)
def test_non_affine_forces_glass():
"""
Test the computation of the non-affine forces
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5),
(1, 2): LennardJonesQuadratic(1.5, 0.8, 2.0),
(2, 2): LennardJonesQuadratic(0.5, 0.88, 2.2)}
atoms = io.read("glass_min.xyz")
b = PairPotential(calc)
atoms.calc = b
FIRE(atoms, logfile=None).run(fmax=1e-5)
naForces_num = b.get_numerical_non_affine_forces(atoms, d=1e-5)
naForces_ana = b.get_nonaffine_forces(atoms)
np.testing.assert_allclose(naForces_num, naForces_ana, atol=0.1)
@pytest.mark.parametrize('a0', [1.0, 1.5, 2.0, 2.5, 3.0])
def test_crystal_birch_elastic_constants(a0):
"""
Test the Birch elastic constants for a crystalline system
"""
calc = {(1, 1): LennardJonesLinear(1, 1, 2.5)}
atoms = FaceCenteredCubic('H', size=[2,2,2], latticeconstant=a0)
b = PairPotential(calc)
atoms.calc = b
FIRE(ase.constraints.UnitCellFilter(atoms, mask=[0, 0, 0, 1, 1, 1]), logfile=None).run(fmax=1e-5)
C_num, Cerr = fit_elastic_constants(atoms, symmetry="triclinic", N_steps=7, delta=1e-4, optimizer=None, verbose=False)
C_ana = full_3x3x3x3_to_Voigt_6x6(b.get_birch_coefficients(atoms))
np.testing.assert_allclose(C_num, C_ana, atol=0.1)
def test_amorphous_birch_elastic_constants():
"""
Test the Birch elastic constants for an amorphous system
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5),
(1, 2): LennardJonesQuadratic(1.5, 0.8, 2.0),
(2, 2): LennardJonesQuadratic(0.5, 0.88, 2.2)}
atoms = io.read("glass_min.xyz")
b = PairPotential(calc)
atoms.calc = b
FIRE(ase.constraints.UnitCellFilter(atoms, mask=[1, 1, 1, 1, 1, 1]), logfile=None).run(fmax=1e-5)
C_num, Cerr = fit_elastic_constants(atoms, symmetry="triclinic", N_steps=7, delta=1e-4, optimizer=None, verbose=False)
C_ana = full_3x3x3x3_to_Voigt_6x6(b.get_birch_coefficients(atoms))
np.testing.assert_allclose(C_num, C_ana, atol=0.1)
@pytest.mark.parametrize('a0', [1.0, 1.5, 2.0, 2.5, 3.0])
def test_non_affine_elastic_constants_crystal(a0):
"""
Test the computation of Birch elastic constants and correction due to non-affine displacements
"""
calc = {(1, 1): LennardJonesLinear(1, 1, 2.5)}
atoms = FaceCenteredCubic('H', size=[3,3,3], latticeconstant=a0)
b = PairPotential(calc)
atoms.calc = b
FIRE(ase.constraints.UnitCellFilter(atoms, mask=[0, 0, 0, 1, 1, 1]), logfile=None).run(fmax=1e-5)
C_num, Cerr = fit_elastic_constants(atoms, symmetry="triclinic", N_steps=7, delta=1e-4, optimizer=FIRE, fmax=1e-5, verbose=False)
anaC_na = full_3x3x3x3_to_Voigt_6x6(b.get_non_affine_contribution_to_elastic_constants(atoms))
anaC_af = full_3x3x3x3_to_Voigt_6x6(b.get_birch_coefficients(atoms))
np.testing.assert_allclose(C_num, anaC_af + anaC_na, atol=0.1)
def test_non_affine_elastic_constants_glass():
"""
Test the computation of Birch elastic constants and correction due to non-affine displacements
"""
calc = {(1, 1): LennardJonesQuadratic(1, 1, 2.5),
(1, 2): LennardJonesQuadratic(1.5, 0.8, 2.0),
(2, 2): LennardJonesQuadratic(0.5, 0.88, 2.2)}
atoms = io.read("glass_min.xyz")
b = PairPotential(calc)
atoms.calc = b
FIRE(ase.constraints.UnitCellFilter(atoms, mask=[1, 1, 1, 1, 1, 1]), logfile=None).run(fmax=1e-5)
C_num, Cerr = fit_elastic_constants(atoms, symmetry="triclinic", N_steps=5, delta=1e-4, optimizer=FIRE, fmax=1e-5, verbose=False)
Cana_af = full_3x3x3x3_to_Voigt_6x6(b.get_birch_coefficients(atoms))
Cana_na = full_3x3x3x3_to_Voigt_6x6(b.get_non_affine_contribution_to_elastic_constants(atoms))
np.testing.assert_allclose(C_num, Cana_na + Cana_af, atol=0.1)
H_nn = b.get_hessian(atoms, "sparse").todense()
eigenvalues, eigenvectors = eigh(H_nn, subset_by_index=[3,3*len(atoms)-1])
Cana2_na = full_3x3x3x3_to_Voigt_6x6(b.get_non_affine_contribution_to_elastic_constants(atoms, eigenvalues, eigenvectors))
np.testing.assert_allclose(C_num, Cana2_na + Cana_af, atol=0.1)
def test_elastic_born_crystal_stress():
class TestPotential():
def __init__(self, cutoff):
self.cutoff = cutoff
def __call__(self, r):
# Return function value (potential energy).
return r - self.cutoff
#return np.ones_like(r)
def get_cutoff(self):
return self.cutoff
def first_derivative(self, r):
return np.ones_like(r)
#return np.zeros_like(r)
def second_derivative(self, r):
return np.zeros_like(r)
def derivative(self, n=1):
if n == 1:
return self.first_derivative
elif n == 2:
return self.second_derivative
else:
raise ValueError(
"Don't know how to compute {}-th derivative.".format(n))
for calc in [{(1, 1): LennardJonesQuadratic(1.0, 1.0, 2.5)}]:
#for calc in [{(1, 1): TestPotential(2.5)}]:
b = PairPotential(calc)
atoms = FaceCenteredCubic('H', size=[6,6,6], latticeconstant=1.2)
# Randomly deform the cell
strain = np.random.random([3, 3]) * 0.02
atoms.set_cell(np.matmul(np.identity(3) + strain, atoms.cell), scale_atoms=True)
atoms.calc = b
FIRE(ase.constraints.StrainFilter(atoms, mask=[1,1,1,0,0,0]), logfile=None).run(fmax=1e-5)
Cnum, Cerr_num = fit_elastic_constants(atoms, symmetry="triclinic", N_steps=11, delta=1e-4, optimizer=None, verbose=False)
Cnum2_voigt = full_3x3x3x3_to_Voigt_6x6(measure_triclinic_elastic_constants(atoms), tol=10)
#Cnum3_voigt = full_3x3x3x3_to_Voigt_6x6(measure_triclinic_elastic_constants_2nd(atoms), tol=10)
Cana = b.get_birch_coefficients(atoms)
Cana_voigt = full_3x3x3x3_to_Voigt_6x6(Cana, tol=10)
#print(atoms.get_stress())
#print(Cnum)
#print(Cana_voigt)
np.set_printoptions(precision=3)
#print("Stress: \n", atoms.get_stress())
#print("Numeric (fit_elastic_constants): \n", Cnum)
#print("Numeric (measure_triclinic_elastic_constants): \n", Cnum2_voigt)
#print("Numeric (measure_triclinic_elastic_constants_2nd): \n", Cnum3_voigt)
#print("Analytic: \n", Cana_voigt)
#print("Absolute Difference (fit_elastic_constants): \n", Cnum-Cana_voigt)
#print("Absolute Difference (measure_triclinic_elastic_constants): \n", Cnum2_voigt-Cana_voigt)
#print("Difference between numeric results: \n", Cnum-Cnum2_voigt)
np.testing.assert_allclose(Cnum, Cana_voigt, atol=10)
|
libAtoms/matscipy
|
tests/test_pair_potential_calculator.py
|
Python
|
lgpl-2.1
| 14,819
|
[
"ASE",
"CRYSTAL",
"Matscipy"
] |
77b4a47719286b51faf27bbeadcfa97a414b4c40b49858ea7dfdd7a4cea113e8
|
from __future__ import (absolute_import, division, print_function)
import numpy as np
import scipy.constants
# Constants
avogadro = scipy.constants.N_A
cm3_to_angstroms3 = 1e24
avogadro_term = avogadro / 1e24
PRECISION = 5
def is_int(value):
"""Checks if `value` is an integer
:param value: Input value to check if integer
:type value: Any
:return: If value is an integer
:rtype: bool
"""
is_number = True
try:
int(value)
except ValueError:
is_number = False
return is_number
def is_float(value):
"""Checks if `value` is a float
:param value: Input value to check if float
:type value: Any
:return: If value is an float
:rtype: bool
"""
is_number = True
try:
float(value)
except ValueError:
is_number = False
return is_number
def is_number(value):
"""Checks if `value` is a float
:param value: Input value to check if float
:type value: Any
:return: If value is an float
:rtype: bool
"""
return is_float(value)
def oneAndOnlyOneTrue(iterable):
"""Determine if iterable (ie list) has one and only one `True` value
:param iterable:
:type iterable: list
:return: If there is one and only one True
:rtype: bool
"""
try:
iterator = iter(iterable)
has_true = any(iterator)
has_another_true = any(iterator)
return has_true and not has_another_true
except Exception as e:
print(e)
raise
def volume_of_cylinder(radius=np.NaN, height=np.NaN):
"""Computes volume of a cylinder
:param radius: Radius of cylinder (in units of length)
:type radius: float
:param height: Height of cylinder (in units of length)
:type height: float
:return: Volume of the cylinder in (in units of :math:`length^{3}`)
:rtype: float
"""
return np.float(np.pi) * np.float(radius)**2 * np.float(height)
def volume_of_sphere(radius=np.NaN):
"""Computes volume of a sphere
:param radius: Radius of sphere (in units of length)
:type radius: float
:return: Volume of the sphere in (in units of :math:`length^{3}`)
:rtype: float
"""
return (4. * np.pi * np.float(radius)**3 / np.float(3))
def volume_of_hollow_cylinder(
inner_radius=np.NaN,
outer_radius=np.NaN,
height=np.NaN):
"""Computes volume of a hollow cylinder
:param inner_radius: Inner radius of cylinder (in units of length)
:type inner_radius: float
:param outer_radius: Outer radius of cylinder (in units of length)
:type outer_radius: float
:param height: Height of cylinder (in units of length)
:type height: float
:return: Volume of the cylinder in (in units of :math:`length^{3}`)
:rtype: float
"""
inner_cylinder = volume_of_cylinder(radius=inner_radius, height=height)
outer_cylinder = volume_of_cylinder(radius=outer_radius, height=height)
return outer_cylinder - inner_cylinder
def get_volume_from_geometry(dictionary):
"""calculate volume given a geometry dictionary of the given shape in example below
:examples:
>>> dictionary = {'Shape': "Cylinder", 'Radius': 0.25, 'Radius2': 'N/A', 'Height': 1.0 }
>>> volume = get_volume_from_geometry(dictionary)
:param dictionary: Geometry JSON
:type dictionary: dict
:return: Volume of the geometry
:rtype: float
"""
if dictionary['Shape'].lower() == 'cylinder':
radius = dictionary['Radius']
height = dictionary['Height']
volume = volume_of_cylinder(radius=radius, height=height)
elif dictionary['Shape'].lower() == 'sphere':
radius = dictionary['Radius']
volume = volume_of_sphere(radius=radius)
elif dictionary['Shape'].lower() == 'hollow cylinder':
inner_radius = dictionary['Radius']
outer_radius = dictionary['Radius2']
height = dictionary['Height']
volume = volume_of_hollow_cylinder(
inner_radius=inner_radius,
outer_radius=outer_radius,
height=height
)
else:
raise Exception("Passed unkown shape into get_volume_from_geometry")
return volume
def mass_density2number_density(mass_density, natoms, molecular_mass):
"""Converts from mass_density (:math:`g/cm^{3}`) to number density (atoms/:math:`\\AA^{3}`)
:param mass_density: mass density in (:math:`g/cm^{3}`)
:type mass_density: float
:param natoms: total number of atoms
:type natoms: float
:param molecular_mass: molecular mass in (:math:`g/mol`)
:type molecular_mass: float
:return: number density in (atoms/:math:`\\AA^{3}`)
:rtype: float
"""
number_density = mass_density * avogadro_term * natoms / molecular_mass
return number_density
def number_density2mass_density(number_density, natoms, molecular_mass):
"""Converts from number density (atoms/:math:`\\AA^{3}`) to mass_density (:math:`g/cm^{3}`)
:param number_density: number density in (atoms/:math:`\\AA^{3}`)
:type number_density: float
:param natoms: total number of atoms
:type natoms: float
:param molecular_mass: molecular mass in (:math:`g/mol`)
:type molecular_mass: float
:return: mass density in (:math:`g/cm^{3}`)
:rtype: float
"""
mass_density = number_density * molecular_mass / natoms / avogadro_term
return mass_density
def mass2mass_density(mass, volume):
"""Converts mass (:math:`g`) and volume (:math:`cm^{3}`) to mass_density (:math:`g/cm^{3}`)
:param mass: mass in (:math:`g`)
:type mass: float
:param volume: volume in (:math:`cm^{3}`)
:type volume: float
:return: mass density in (:math:`g/cm^{3}`)
:rtype: float
"""
mass_density = mass / volume
return mass_density
def mass2number_density(mass, volume, natoms, molecular_mass):
"""Converts mass (:math:`g`) and volume (:math:`cm^{3}`) to number density (atoms/:math:`\\AA^{3}`)
:param mass: mass in (:math:`g`)
:type mass: float
:param volume: volume in (:math:`cm^{3}`)
:type volume: float
:param natoms: total number of atoms
:type natoms: float
:param molecular_mass: molecular mass in (:math:`g/mol`)
:type molecular_mass: float
:return: number density in (atoms/:math:`\\AA^{3}`)
:rtype: float
"""
mass_density = mass2mass_density(mass, volume)
number_density = mass_density2number_density(
mass_density, natoms, molecular_mass)
return number_density
def mass_density2mass(mass_density, volume):
"""Converts from mass_density (:math:`g/cm^{3}`) to mass (:math:`g`)
:param mass_density: mass density in (:math:`g/cm^{3}`)
:type mass_density: float
:param volume: volume in (:math:`cm^{3}`)
:type volume: float
:return: mass in (:math:`g`)
:rtype: float
"""
mass = mass_density * volume
return mass
def number_density2mass(number_density, volume, natoms, molecular_mass):
"""Converts from number density (atoms/:math:`\\AA^{3}`) to mass (:math:`g`)
:param number_density: number density in (atoms/:math:`\\AA^{3}`)
:type number_density: float
:param volume: volume in (:math:`cm^{3}`)
:type volume: float
:param natoms: total number of atoms
:type natoms: float
:param molecular_mass: molecular mass in (:math:`g/mol`)
:type molecular_mass: float
:return: mass in (:math:`g`)
:rtype: float
"""
mass_density = number_density2mass_density(
number_density, natoms, molecular_mass)
mass = mass_density2mass(mass_density, volume)
return mass
|
neutrons/FastGR
|
addie/utilities/math_tools.py
|
Python
|
mit
| 7,624
|
[
"Avogadro"
] |
ee4e724d7e3fffee286c94164b00a763070c9ffa97d2ab67c83787ee159ed61b
|
from __future__ import print_function
from numpy import *
'''
Authors: Kai Liao, Adri Agnello (UCSB and UCLA)
Phil Marshall (Stanford)
Started: Liao, Aug.2014
Description: Convert Adri's Mathematica version into Python.
Given the survey imaging conditions and an object ID in OM10,
paint it in chosen band (g,r,i,z). Current python version
adapted on doubles, must be refactored to work on quads and
systems with any number of point-sources in image-plane.
'''
def bs(n):
return (2.*n-1.)/3.
def Sersic(R, n):
return exp(-bs(n)*R**(1./n))
def flase(x, y, flat, pa, n):
return Sersic((flat*(x*cos(pa)+y*sin(pa))**2.+flat**(-1.0)*(-sin(pa)*x+cos(pa)*y)**2.)**0.5, n)
def G(x, dx):
return exp(-0.5*x**2./dx**2.)/((2*pi)**0.5*dx)
def GG(x, dx):
return G(abs(x)**0.5, dx)
def Gint(x, y, dx, dy):
return (9./16)*G(x,dx)*G(y,dy)+(3./32)*(G(x+1.,dx)*G(y,dy)+G(x-1.,dx)*G(y,dy)+\
G(x,dx)*G(y+1.,dy)+G(x,dx)*G(y-1.,dy))+(1/64.)*(G(x-1.,dx)*G(y-1.,dy)+\
G(x-1.,dx)*G(y+1.,dy)+G(x+1.,dx)*G(y-1.,dt)+G(x+1.,dx)*G(y+1.,dy))
#Gint is useful to interpolate the Gaussian psf on 3*3 grid, i.e. sharing PSF fluxes among neighbouring pixels.
#SDSS
pixscale = 0.4
meanIQ = 1.4/2
meanIQ = meanIQ/(log(2.)*2.**0.5) #the log is log_e!
meandepth = 20.8 #magnitudes per arcsecond
errdepth = 0.3
#more specific: band fluxes and fluctuations
gmean = 21.9
egd = 0.3
gsky = pixscale**2.*10.**(9.-0.4*gmean)
rmean = 20.9
erd = 0.3
rsky = pixscale**2.*10.**(9.-0.4*rmean)
imean = 20.2
eid = 0.4
isky = pixscale**2.*10.**(9.-0.4*imean)
zmean = 18.9
ezd = 0.5
zsky = pixscale**2.*10.**(9.-0.4*zmean)
#psf width distributions
mgIQ = 1.65/(2.*2.**0.5*log(2.))
dgIQ = 0.4/(2.*2.**0.5*log(2.))
moIQ = 1.4/(2.*2.**0.5*log(2.))
doIQ = 0.3/(2.*2.**0.5*log(2.)) #psf width in the other bands
dr = pixscale**2.*10.**(9.-0.4*meandepth)/5. #five sigma detection of deepest source
expo = (log(10.)*erd/(2.5*(2*pi)**0.5))/dr**2.
dg = (log(10.)*egd/(2.5*(2*pi)**0.5))**0.5/expo**0.5
di = (log(10.)*eid/(2.5*(2*pi)**0.5))**0.5/expo**0.5
dz = (log(10.)*ezd/(2.5*(2*pi)**0.5))**0.5/expo**0.5
|
drphilmarshall/OM10
|
om10/imagenew.py
|
Python
|
mit
| 2,115
|
[
"Gaussian"
] |
c08283850a141b33a73c400c1b9677d21143b95a2c62b9a687e5e316671d8574
|
# -*- coding: utf-8 -*-
'''
utilities.py
'''
import os
import tarfile
from scipy.spatial import cKDTree
import numpy as np
from shutil import rmtree, copyfile
from .pycompat import iteritems, SafeConfigParser
from netCDF4 import Dataset
from logging import getLogger
from .log import LOG_NAME
from .share import TIMESTAMPFORM, RPOINTER, EARTHRADIUS, METERSPERMILE
from .share import METERS2PERACRE, METERSPERKM, VALID_CHARS
from .config import read_config
from .pycompat import pyzip
# -------------------------------------------------------------------- #
# create logger
log = getLogger(LOG_NAME)
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# find x y coordinates
def latlon2yx(plats, plons, glats, glons):
'''find y x coordinates '''
# use astronomical conventions for longitude
# (i.e. negative longitudes to the east of 0)
if (glons.max() > 180):
posinds = np.nonzero(glons > 180)
glons[posinds] -= 360
log.info('adjusted grid lon to astronomical conventions')
if (plons.max() > 180):
posinds = np.nonzero(plons > 180)
plons[posinds] -= 360
log.info('adjusted point lon to astronomical conventions')
if glons.ndim == 1 or glats.ndim == 1:
glons, glats = np.meshgrid(glons, glats)
combined = np.dstack(([glats.ravel(), glons.ravel()]))[0]
points = list(np.vstack((np.array(plats), np.array(plons))).transpose())
mytree = cKDTree(combined)
indexes = mytree.query(points, k=1)[1]
y, x = np.unravel_index(indexes, glons.shape)
return y, x
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Search neighboring grid cells for channel
def search_for_channel(source_area, routys, routxs, search=1, tol=10):
'''Search neighboring grid cells for channel'''
log.debug('serching for channel, tol: %f, search: %i', tol, search)
new_ys = np.copy(routys)
new_xs = np.copy(routxs)
ysize, xsize = source_area.shape
for i, (y, x) in enumerate(pyzip(routys, routxs)):
area0 = source_area[y, x]
for j in range(search + 1):
ymin = np.clip(y - j, 0, ysize)
ymax = np.clip(y + j + 1, 0, ysize)
xmin = np.clip(x - j, 0, xsize)
xmax = np.clip(x + j + 1, 0, xsize)
search_area = source_area[ymin:ymax, xmin:xmax]
if np.any(search_area / area0 > tol):
sy, sx = np.unravel_index(search_area.argmax(),
search_area.shape)
new_ys[i] = np.clip(y + sy - j, 0, ysize)
new_xs[i] = np.clip(x + sx - j, 0, xsize)
log.debug('Moving pour point to channel y: %s->%s, x: %s->%s',
y, new_ys[i], x, new_xs[i])
log.debug('Source Area has increased from %s to %s',
area0, source_area[new_ys[i], new_xs[i]])
break
return new_ys, new_xs
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Write rpointer file
def write_rpointer(restart_dir, restart_file, timestamp):
''' Write a configuration file with restart file and time '''
rpointer_file = os.path.join(restart_dir, RPOINTER)
config = SafeConfigParser()
config.optionxform = str
time_str = timestamp.strftime(TIMESTAMPFORM)
config.add_section('RESTART')
config.set('RESTART', 'FILE_NAME', os.path.join(restart_dir, restart_file))
config.set('RESTART', 'TIMESTAMP', time_str)
with open(rpointer_file, 'w') as configfile:
config.write(configfile)
return
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# A helper function to read a netcdf file
def read_netcdf(nc_file, variables=None, coords=None):
'''
Read data from input netCDF. Will read all variables if none provided.
Will also return all variable attributes.
Both variables (data and attributes) are returned as dictionaries named
by variable
'''
f = Dataset(nc_file, 'r')
if not variables:
variables = list(f.variables.keys())
if not coords:
coords = slice(None)
log.debug('Reading input data variables: %s, from file: %s', variables,
nc_file)
d = {}
a = {}
g = {}
for var in variables:
d[var] = f.variables[var][coords]
a[var] = f.variables[var].__dict__
for attr in f.ncattrs():
g[attr] = getattr(f, attr)
f.close()
return d, a, g
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Check to make sure all the expected variables are present in the dictionary
def check_ncvars(config_section, nckeys):
'''
Make sure the variables listed in the config file are present in the netcdf
'''
for key, value in iteritems(config_section):
if key.endswith('var'):
if value not in nckeys:
log.error('%s (%s) not in %s', value, key,
config_section['FILE_NAME'])
raise NameError('Check netcdf that netcdf variable names match'
' those in the configuration file')
return
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Find the index of the the nearest value
def find_nearest(array, value):
''' Find the index location in (array) with value nearest to (value)'''
return np.abs(array - value).argmin()
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Delete all the files in a directory
def clean_dir(directory):
''' Clean all files in a directory'''
for file_name in os.listdir(directory):
file_path = os.path.join(directory, file_name)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
except Exception:
log.exception('Error cleaning file: %s', file_path)
return
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Delete a particular file
def clean_file(file_name):
''' Delete the file'''
try:
if os.path.isfile(file_name):
os.unlink(file_name)
except Exception:
log.exception('Error cleaning file: %s', file_name)
return
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Make a set of directories
def make_directories(rundir, subdir_names):
'''Make rvic directory structure'''
if not os.path.exists(rundir):
os.makedirs(rundir)
paths = {}
for s in subdir_names:
paths[s] = os.path.join(rundir, s)
if not os.path.exists(paths[s]):
os.makedirs(paths[s])
return paths
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Move all the input files to a central location
def copy_inputs(config_file, inputs_dir):
config_dict = read_config(config_file)
config = SafeConfigParser()
config.optionxform = str
config.read(config_file)
new_config = os.path.join(inputs_dir, os.path.split(config_file)[1])
# ---------------------------------------------------------------- #
# copy the inputs
for key, section in iteritems(config_dict):
if 'FILE_NAME' in list(section.keys()):
new_file_name = os.path.join(
inputs_dir, os.path.split(section['FILE_NAME'])[1])
copyfile(section['FILE_NAME'], new_file_name)
# update the config file for an easy restart
config.set(key, 'FILE_NAME',
os.path.join(inputs_dir,
os.path.split(section['FILE_NAME'])[1]))
# update the config_dict with the new value
config_dict[key]['FILE_NAME'] = new_file_name
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# write the new configuration file
with open(new_config, 'w') as configfile:
config.write(configfile)
# ---------------------------------------------------------------- #
return config_dict
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def tar_inputs(inputs, suffix='', tar_type='tar'):
''' Tar the inputss directory or file at the end of a run'''
# ---------------------------------------------------------------- #
# Make the TarFile
if tar_type == 'tar':
end = '.tar'
mode = 'w:'
elif tar_type in ['tgz', 'tar.gz', 'gunzip']:
end = '.tgz'
mode = 'w:'
else:
log.warning('Unknown tar_type: %s, proceeding with gunzipped mode',
tar_type)
end = '.tgz'
mode = 'w:'
tar_file = inputs + suffix + end
log.info('tarfile: %s', tar_file)
if os.path.isdir(inputs):
arcname = os.path.basename(os.path.normpath(inputs))
else:
arcname = os.path.split(inputs)[1]
with tarfile.open(tar_file, mode) as tar:
tar.add(inputs, arcname=arcname)
# ---------------------------------------------------------------- #
# Check to make sure the TarFile exists before deleting the sources
if os.path.exists(tar_file):
# ------------------------------------------------------------ #
# Remove the inputs
if os.path.isdir(inputs):
rmtree(inputs)
elif os.path.isfile(inputs):
os.unlink(inputs)
# ------------------------------------------------------------ #
else:
log.error('Problem removing inputs: %s', inputs)
# ---------------------------------------------------------------- #
return tar_file
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
# Read the domain
def read_domain(domain_dict, lat0_is_min=False):
'''
Read the domain file and return all the variables and attributes.
Area is returned in m2
'''
dom_data, dom_vatts, dom_gatts = read_netcdf(domain_dict['FILE_NAME'])
check_ncvars(domain_dict, list(dom_data.keys()))
# ---------------------------------------------------------------- #
# Create the cell_ids variable
dom_mask = domain_dict['LAND_MASK_VAR']
temp = np.arange(dom_data[dom_mask].size)
dom_data['cell_ids'] = temp.reshape(dom_data[dom_mask].shape)
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Make sure the longitude / latitude vars are 2d
dom_lat = domain_dict['LATITUDE_VAR']
dom_lon = domain_dict['LONGITUDE_VAR']
dom_data['cord_lons'] = dom_data[dom_lon][:]
dom_data['cord_lats'] = dom_data[dom_lat][:]
if dom_data[dom_lon].ndim == 1:
# ------------------------------------------------------------- #
# Check latitude order, flip if necessary.
if (dom_data[dom_lat][-1] > dom_data[dom_lat][0]) != lat0_is_min:
log.debug('Domain Inputs came in upside down, flipping everything '
'now.')
var_list = list(dom_data.keys())
var_list.remove(dom_lon)
for var in var_list:
dom_data[var] = np.flipud(dom_data[var])
# ------------------------------------------------------------ #
# ------------------------------------------------------------- #
# Make 2d coordinate vars
dom_data[dom_lon], dom_data[dom_lat] = np.meshgrid(dom_data[dom_lon],
dom_data[dom_lat])
# ------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# ---------------------------------------------------------------- #
# Make sure the area is in m2
dom_area = domain_dict['AREA_VAR']
area_units = dom_vatts[dom_area]['units']
if area_units in ['rad2', 'radians2', 'radian2', 'radian^2', 'rad^2',
'radians^2', 'rads^2', 'radians squared',
'square-radians']:
dom_data[dom_area] = dom_data[dom_area] * EARTHRADIUS * EARTHRADIUS
elif area_units in ['m2', 'm^2', 'meters^2', 'meters2', 'square-meters',
'meters squared']:
dom_data[dom_area] = dom_data[dom_area]
elif area_units in ['km2', 'km^2', 'kilometers^2', 'kilometers2',
'square-kilometers', 'kilometers squared']:
dom_data[dom_area] = dom_data[dom_area] * METERSPERKM * METERSPERKM
elif area_units in ['mi2', 'mi^2', 'miles^2', 'miles', 'square-miles',
'miles squared']:
dom_data[dom_area] = dom_data[dom_area] * METERSPERMILE * METERSPERMILE
elif area_units in ['acres', 'ac', 'ac.']:
dom_data[dom_area] = dom_data[dom_area] * METERS2PERACRE
else:
log.warning('WARNING: UNKNOWN AREA units (%s), ASSUMING THEY ARE IN '
'SQUARE METERS',
dom_data[domain_dict['AREA_VAR']]['units'])
# ---------------------------------------------------------------- #
return dom_data, dom_vatts, dom_gatts
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def strip_non_ascii(in_string):
''' Returns the string without non ASCII characters'''
stripped = (c for c in in_string if 0 < ord(c) < 127)
return ''.join(stripped)
# -------------------------------------------------------------------- #
# -------------------------------------------------------------------- #
def strip_invalid_char(in_string):
''' Returns the string without invalid characters for filenames'''
return ''.join(c for c in in_string if c in VALID_CHARS)
# -------------------------------------------------------------------- #
|
wietsefranssen/RVIC
|
rvic/core/utilities.py
|
Python
|
gpl-3.0
| 14,758
|
[
"NetCDF"
] |
2cc5ca1c8e10caf745d0fc619f731ce6f8743c17edd124d6de9e9bcd0e5706fc
|
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
*********************************************
**espressopp.analysis.ConfigsParticleDecomp**
*********************************************
.. function:: espressopp.analysis.ConfigsParticleDecomp(system)
:param system:
:type system:
.. function:: espressopp.analysis.ConfigsParticleDecomp.clear()
:rtype:
.. function:: espressopp.analysis.ConfigsParticleDecomp.compute()
:rtype:
.. function:: espressopp.analysis.ConfigsParticleDecomp.gather()
:rtype:
.. function:: espressopp.analysis.ConfigsParticleDecomp.gatherFromFile(filename)
:param filename:
:type filename:
:rtype:
"""
#from espressopp.esutil import cxxinit
from espressopp import pmi
from _espressopp import analysis_ConfigsParticleDecomp
class ConfigsParticleDecompLocal(analysis_ConfigsParticleDecomp):
def __init__(self, system):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, analysis_ConfigsParticleDecomp, system)
def gather(self):
return self.cxxclass.gather(self)
def gatherFromFile(self, filename):
return self.cxxclass.gatherFromFile(self, filename)
def clear(self):
return self.cxxclass.clear(self)
def __iter__(self):
return self.cxxclass.all(self).__iter__()
def compute(self):
return self.cxxclass.compute(self)
if pmi.isController:
class ConfigsParticleDecomp(object):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
#cls = 'espressopp.analysis.ConfigsParticleDecompLocal',
pmicall = [ "gather", "gatherFromFile", "clear", "compute" ],
localcall = ["__getitem__", "all"],
pmiproperty = ["size"]
)
|
junghans/espressopp
|
src/analysis/ConfigsParticleDecomp.py
|
Python
|
gpl-3.0
| 2,571
|
[
"ESPResSo"
] |
2b669962967023f73f2d5727b9512d81f49df0f6c3e26778fdb8398757c91f30
|
# BEGIN_COPYRIGHT
#
# Copyright (C) 2014 CRS4.
#
# This file is part of blast-python.
#
# blast-python is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# blast-python is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# blast-python. If not, see <http://www.gnu.org/licenses/>.
#
# END_COPYRIGHT
"""
Test blaster speed by comparison with NCBI blastall.
"""
import sys, os, time, random, optparse, logging
import ncbi_toolkit, BlastPython
GID_BASE = 1000
STRAND = ncbi_toolkit.strand.both
def make_seq(n):
return ''.join([random.choice(['A', 'C', 'G', 'T']) for i in xrange(n)])
def make_fasta(seq, id, title):
return '>gi|%d %s\n%s\n' % (id, title, seq)
def generate_db_fasta(fname, nseq, seq_len_ave, seq_len_sigma):
fp = open(fname, 'w')
for i in xrange(nseq):
l = random.gauss(seq_len_ave, seq_len_sigma)
l = max(int(l), int(seq_len_ave - 3*seq_len_sigma))
s = make_seq(l)
fp.write(make_fasta(s, GID_BASE + i, 'title %d' % i));
fp.close()
def formatdb(dbname):
os.system('formatdb -V -o T -p F -i %s' % dbname)
def str_db(dbname):
fsio = BlastPython.FS_IO(dbname, 100000)
fs = BlastPython.fasta_stream_from_stream(fsio)
of = open(dbname + '.str', 'w')
for f in fs:
h, t = f.split('\n')[0:2]
i = (h.split()[0]).split("|")[1]
of.write('%s %s\n' % (i, t))
of.close()
def run_blastall(pname, dbname, qname, e):
os.system('blastall -p %s -d %s -i %s -e %s -o blastall.out' % (
pname, dbname, qname, e))
def run_blaster_stream(pname, query, subjects, e):
kwds = {'Program' : pname}
b = BlastPython.blaster(query, **kwds)
counter = 0
for subject in subjects:
_, result = b.blast(subject)
hit_list = result[0]
if hit_list is not None:
hsp_list = hit_list[0]
for hsp in hsp_list:
if hsp.evalue < e:
counter += 1
return counter
def run_pyblast_fasta(dbname, qname, pname, e):
tot_count = 0
q_factory = BlastPython.seq_factory_from_fasta(STRAND)
q_fsio = BlastPython.FS_IO(qname, 1000000)
q_fs = BlastPython.fasta_stream_from_stream(q_fsio)
q_ss = BlastPython.blast_seq_stream(q_factory, q_fs)
db_factory = BlastPython.seq_factory_from_fasta(STRAND)
db_fsio = BlastPython.FS_IO(dbname, 1000000)
db_fs = BlastPython.fasta_stream_from_stream(db_fsio)
db_ss = BlastPython.blast_seq_stream(db_factory, db_fs)
subjects = list(db_ss)
for query in q_ss:
tot_count += run_blaster_stream(pname, query, subjects, e)
logging.info('run_pyblast_fasta: %d hits' % tot_count)
def run_pyblast_str(dbname, qname, pname, e):
tot_count = 0
q_factory = BlastPython.seq_factory_from_str(STRAND)
q_fsio = BlastPython.FS_IO(qname+'.str', 1000000)
q_ss = BlastPython.blast_seq_stream(q_factory, q_fsio)
db_factory = BlastPython.seq_factory_from_str(STRAND)
db_fsio = BlastPython.FS_IO(dbname+'.str', 1000000)
db_ss = BlastPython.blast_seq_stream(db_factory, db_fsio)
subjects = list(db_ss)
for i, query in enumerate(q_ss):
tot_count += run_blaster_stream(pname, query, subjects, e)
logging.info('run_pyblast_str: %d hits' % tot_count)
def run_pyblast_str_no_blast(dbname, qname, pname, e):
fsio = BlastPython.FS_IO(dbname + '.str', 1000000)
factory_str = BlastPython.seq_factory_from_str(STRAND)
factory_fasta = BlastPython.seq_factory_from_fasta(STRAND)
fasta = open(qname).read()
query = factory_fasta.make(fasta)
ss = BlastPython.blast_seq_stream(factory_str, fsio)
for s in ss:
pass
def run_all_blast(dbname, qname, pname, e):
formatdb(dbname)
start = time.time()
run_blastall(pname, dbname, qname, e)
return (time.time() - start)
def run_all_pyblast_fasta(dbname, qname, pname, e):
start = time.time()
run_pyblast_fasta(dbname, qname, pname, e)
return (time.time() - start)
def run_all_pyblast_str(dbname, qname, pname, e):
start = time.time()
run_pyblast_str(dbname, qname, pname, e)
return (time.time() - start)
def run_all_pyblast_str_multiple(dbname, qname, pname, e):
q_fsio = BlastPython.FS_IO(qname + '.str', 1000000)
db_fsio = BlastPython.FS_IO(dbname + '.str', 1000000)
factory = BlastPython.seq_factory_from_str(STRAND)
queries = list(BlastPython.blast_seq_stream(factory, q_fsio))
subjects = list(BlastPython.blast_seq_stream(factory, db_fsio))
kwds = {'Program' : pname}
b = BlastPython.blaster(queries, **kwds)
start = time.time()
_, res = b.blast(subjects)
counter = 0
for hit_list in res:
if hit_list is None:
continue
for hsp_list in hit_list:
for hsp in hsp_list:
if hsp.evalue < e:
counter += 1
logging.info('run_all_pyblast_str_multiple: %d hits' % counter)
return (time.time() - start)
def make_parser():
usage = "%prog [OPTIONS] NQUERIES NSUBJECTS SEQ_LEN_AVE SEQ_LEN_STD"
parser = optparse.OptionParser(usage)
parser.set_description(__doc__.lstrip())
parser.add_option("-p", type="choice", dest="program", metavar="STRING",
help="blast program to use",
choices = ["blastn", "tblastx"])
parser.add_option("-v", action="store_const", const=logging.INFO,
dest="loglevel", help="display verbose info")
parser.set_defaults(program="blastn", loglevel=logging.WARNING)
o = parser.get_option("-p")
o.help += " %r" % o.choices
return parser
def main(argv):
parser = make_parser()
opt, args = parser.parse_args(argv)
logging.basicConfig(level=opt.loglevel, format='%(message)s')
try:
nseqs_q = int(args[1])
nseqs_db = int(args[2])
seq_len_ave = int(args[3])
seq_len_std = int(args[4])
except IndexError:
parser.print_help()
sys.exit(2)
except ValueError, e:
print "ERROR: all arguments beyond the first one must be integers"
sys.exit(str(e))
dbname = 'tdb'
qname = 'query.fa'
e = 1e10
generate_db_fasta(qname, nseqs_q, seq_len_ave, seq_len_std)
generate_db_fasta(dbname, nseqs_db, seq_len_ave, seq_len_std)
str_db(dbname)
str_db(qname)
dt1 = run_all_blast(dbname, qname, opt.program, e)
if opt.program == 'tblastx':
pname = ncbi_toolkit.EProgram.eTblastx
elif opt.program == 'blastn':
pname = ncbi_toolkit.EProgram.eBlastn
else:
pname = ncbi_toolkit.EProgram.eTblastx
dt2 = run_all_pyblast_fasta(dbname, qname, pname, e)
dt3 = run_all_pyblast_str(dbname, qname, pname, e)
dt4 = run_all_pyblast_str_multiple(dbname, qname, pname, e)
logging.info("\t".join(["program", "nseqs_q", "nseqs_db", "seq_len_ave",
"seq_len_std", "blastall", "fasta", "str",
"str_multi"]))
print "\t".join(map(str, [opt.program, nseqs_q, nseqs_db, seq_len_ave,
seq_len_std, dt1, dt2, dt3, dt4]))
if __name__ == "__main__":
main(sys.argv)
|
crs4/blast-python
|
test/test_blaster_speed.py
|
Python
|
gpl-3.0
| 7,600
|
[
"BLAST"
] |
b89c2cd049b973f1144baf904551dee5124d24dca5985089cdde7aa4a1b96a43
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.