code stringlengths 1 1.49M | vector listlengths 0 7.38k | snippet listlengths 0 7.38k |
|---|---|---|
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
class DataRow(object):
_row_continuation_marker = '...'
_whitespace_regexp = re.compile('\s+')
_ye_olde_metadata_prefix = 'meta:'
def __init__(self, cells):
self.cells, self.comments = self._parse(cells)
def _parse(self, row):
data = []
comments = []
for cell in row:
cell = self._collapse_whitespace(cell)
if cell.startswith('#') and not comments:
comments.append(cell[1:])
elif comments:
comments.append(cell)
else:
data.append(cell)
return self._purge_empty_cells(data), self._purge_empty_cells(comments)
def _collapse_whitespace(self, cell):
return self._whitespace_regexp.sub(' ', cell).strip()
def _purge_empty_cells(self, row):
while row and not row[-1]:
row.pop()
# Cells with only a single backslash are considered empty
return [cell if cell != '\\' else '' for cell in row]
@property
def head(self):
return self.cells[0] if self.cells else None
@property
def _tail(self):
return self.cells[1:] if self.cells else None
@property
def all(self):
return self.cells
@property
def data(self):
if self.is_continuing():
index = self.cells.index(self._row_continuation_marker) + 1
return self.cells[index:]
return self.cells
def dedent(self):
datarow = DataRow([])
datarow.cells = self._tail
datarow.comments = self.comments
return datarow
def startswith(self, value):
return self.head() == value
def handle_old_style_metadata(self):
if self._is_metadata_with_olde_prefix(self.head):
self.cells = self._convert_to_new_style_metadata()
def _is_metadata_with_olde_prefix(self, value):
return value.lower().startswith(self._ye_olde_metadata_prefix)
def _convert_to_new_style_metadata(self):
return ['Metadata'] + [self.head.split(':', 1)[1].strip()] + self._tail
def starts_for_loop(self):
if self.head and self.head.startswith(':'):
return self.head.replace(':', '').replace(' ', '').upper() == 'FOR'
return False
def starts_test_or_user_keyword_setting(self):
head = self.head
return head and head[0] == '[' and head[-1] == ']'
def test_or_user_keyword_setting_name(self):
return self.head[1:-1].strip()
def is_indented(self):
return self.head == ''
def is_continuing(self):
for cell in self.cells:
if cell == self._row_continuation_marker:
return True
if cell:
return False
def is_commented(self):
return bool(not self.cells and self.comments)
def __nonzero__(self):
return bool(self.cells or self.comments)
| [
[
1,
0,
0.1339,
0.0089,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
3,
0,
0.5804,
0.8482,
0,
0.66,
1,
207,
0,
20,
0,
0,
186,
0,
29
],
[
14,
1,
0.1696,
0.0089,
1,
0.... | [
"import re",
"class DataRow(object):\n _row_continuation_marker = '...'\n _whitespace_regexp = re.compile('\\s+')\n _ye_olde_metadata_prefix = 'meta:'\n\n def __init__(self, cells):\n self.cells, self.comments = self._parse(cells)",
" _row_continuation_marker = '...'",
" _whitespace_r... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datarow import DataRow
from model import (TestCaseFile, TestDataDirectory, ResourceFile,
TestCase, UserKeyword)
| [
[
1,
0,
0.8824,
0.0588,
0,
0.66,
0,
496,
0,
1,
0,
0,
496,
0,
0
],
[
1,
0,
0.9706,
0.1176,
0,
0.66,
1,
722,
0,
5,
0,
0,
722,
0,
0
]
] | [
"from datarow import DataRow",
"from model import (TestCaseFile, TestDataDirectory, ResourceFile,\n TestCase, UserKeyword)"
] |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import os
from docutils.core import publish_cmdline
from htmlreader import HtmlReader
# Ignore custom sourcecode directives at least we use in reST sources.
# See e.g. ug2html.py for an example how custom directives are created.
from docutils.parsers.rst import directives
ignorer = lambda *args: []
ignorer.content = 1
directives.register_directive('sourcecode', ignorer)
del directives, ignorer
class RestReader(HtmlReader):
def read(self, rstfile, rawdata):
htmlpath = self._rest_to_html(rstfile.name)
htmlfile = None
try:
htmlfile = open(htmlpath, 'rb')
return HtmlReader.read(self, htmlfile, rawdata)
finally:
if htmlfile:
htmlfile.close()
os.remove(htmlpath)
def _rest_to_html(self, rstpath):
filedesc, htmlpath = tempfile.mkstemp('.html')
os.close(filedesc)
publish_cmdline(writer_name='html', argv=[rstpath, htmlpath])
return htmlpath
| [
[
1,
0,
0.3077,
0.0192,
0,
0.66,
0,
516,
0,
1,
0,
0,
516,
0,
0
],
[
1,
0,
0.3269,
0.0192,
0,
0.66,
0.125,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.3654,
0.0192,
0,
0... | [
"import tempfile",
"import os",
"from docutils.core import publish_cmdline",
"from htmlreader import HtmlReader",
"from docutils.parsers.rst import directives",
"ignorer = lambda *args: []",
"ignorer.content = 1",
"directives.register_directive('sourcecode', ignorer)",
"class RestReader(HtmlReader):... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import HTMLParser
import sys
from htmlentitydefs import entitydefs
extra_entitydefs = {'nbsp': ' ', 'apos': "'", 'tilde': '~'}
class HtmlReader(HTMLParser.HTMLParser):
IGNORE = 0
INITIAL = 1
PROCESS = 2
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self._encoding = 'ISO-8859-1'
self._handlers = { 'table_start' : self.table_start,
'table_end' : self.table_end,
'tr_start' : self.tr_start,
'tr_end' : self.tr_end,
'td_start' : self.td_start,
'td_end' : self.td_end,
'th_start' : self.td_start,
'th_end' : self.td_end,
'br_start' : self.br_start,
'meta_start' : self.meta_start }
def read(self, htmlfile, populator):
self.populator = populator
self.state = self.IGNORE
self.current_row = None
self.current_cell = None
for line in htmlfile.readlines():
self.feed(line)
# Calling close is required by the HTMLParser but may cause problems
# if the same instance of our HtmlParser is reused. Currently it's
# used only once so there's no problem.
self.close()
self.populator.eof()
def handle_starttag(self, tag, attrs):
handler = self._handlers.get(tag+'_start')
if handler is not None:
handler(attrs)
def handle_endtag(self, tag):
handler = self._handlers.get(tag+'_end')
if handler is not None:
handler()
def handle_data(self, data, decode=True):
if self.state == self.IGNORE or self.current_cell is None:
return
if decode:
data = data.decode(self._encoding)
self.current_cell.append(data)
def handle_entityref(self, name):
value = self._handle_entityref(name)
self.handle_data(value, decode=False)
def _handle_entityref(self, name):
if extra_entitydefs.has_key(name):
return extra_entitydefs[name]
try:
value = entitydefs[name]
except KeyError:
return '&'+name+';'
if value.startswith('&#'):
return unichr(int(value[2:-1]))
return value.decode('ISO-8859-1')
def handle_charref(self, number):
value = self._handle_charref(number)
self.handle_data(value, decode=False)
def _handle_charref(self, number):
try:
return unichr(int(number))
except ValueError:
return '&#'+number+';'
def handle_pi(self, data):
encoding = self._get_encoding_from_pi(data)
if encoding:
self._encoding = encoding
def unknown_decl(self, data):
# Ignore everything even if it's invalid. This kind of stuff comes
# at least from MS Excel
pass
def table_start(self, attrs=None):
self.state = self.INITIAL
self.current_row = None
self.current_cell = None
def table_end(self):
if self.current_row is not None:
self.tr_end()
self.state = self.IGNORE
def tr_start(self, attrs=None):
if self.current_row is not None:
self.tr_end()
self.current_row = []
def tr_end(self):
if self.current_row is None:
return
if self.current_cell is not None:
self.td_end()
if self.state == self.INITIAL:
if len(self.current_row) > 0:
if self.populator.start_table(self.current_row):
self.state = self.PROCESS
else:
self.state = self.IGNORE
else:
self.state = self.IGNORE
elif self.state == self.PROCESS:
self.populator.add(self.current_row)
self.current_row = None
def td_start(self, attrs=None):
if self.current_cell is not None:
self.td_end()
if self.current_row is None:
self.tr_start()
self.current_cell = []
def td_end(self):
if self.current_cell is not None and self.state != self.IGNORE:
cell = ''.join(self.current_cell)
self.current_row.append(cell)
self.current_cell = None
def br_start(self, attrs=None):
if self.current_cell is not None and self.state != self.IGNORE:
self.current_cell.append('\n')
def meta_start(self, attrs):
encoding = self._get_encoding_from_meta(attrs)
if encoding:
self._encoding = encoding
def _get_encoding_from_meta(self, attrs):
valid_http_equiv = False
encoding = None
for name, value in attrs:
name = name.lower()
if name == 'http-equiv' and value.lower() == 'content-type':
valid_http_equiv = True
if name == 'content':
for token in value.split(';'):
token = token.strip()
if token.lower().startswith('charset='):
encoding = token[8:]
return valid_http_equiv and encoding or None
def _get_encoding_from_pi(self, data):
data = data.strip()
if not data.lower().startswith('xml '):
return None
if data.endswith('?'):
data = data[:-1]
for token in data.split():
if token.lower().startswith('encoding='):
encoding = token[9:]
if encoding.startswith("'") or encoding.startswith('"'):
encoding = encoding[1:-1]
return encoding
return None
# Workaround for following bug in Python 2.6: http://bugs.python.org/issue3932
if sys.version_info[:2] > (2, 5):
def unescape_from_py25(self, s):
if '&' not in s:
return s
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace("'", "'")
s = s.replace(""", '"')
s = s.replace("&", "&") # Must be last
return s
HTMLParser.HTMLParser.unescape = unescape_from_py25
| [
[
1,
0,
0.0792,
0.005,
0,
0.66,
0,
217,
0,
1,
0,
0,
217,
0,
0
],
[
1,
0,
0.0842,
0.005,
0,
0.66,
0.2,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0891,
0.005,
0,
0.66,
... | [
"import HTMLParser",
"import sys",
"from htmlentitydefs import entitydefs",
"extra_entitydefs = {'nbsp': ' ', 'apos': \"'\", 'tilde': '~'}",
"class HtmlReader(HTMLParser.HTMLParser):\n IGNORE = 0\n INITIAL = 1\n PROCESS = 2\n\n def __init__(self):\n HTMLParser.HTMLParser.__init__(self)\n... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Various tasks
# - Rename this module so that it is not confused with standard json module
# - Consider moving under utils
# - Cleanup
def encode_basestring(string):
def get_matching_char(c):
val = ord(c)
if val < 127 and val > 31:
return c
return '\\u' + hex(val)[2:].rjust(4,'0')
# TODO: Our log doesn't contain all these control chars
string = string.replace('\\', '\\\\')
string = string.replace('"', '\\"')
string = string.replace('\b', '\\b')
string = string.replace('\f', '\\f')
string = string.replace('\n', '\\n')
string = string.replace('\r', '\\r')
string = string.replace('\t', '\\t')
return '"%s"' % ''.join(get_matching_char(c) for c in string)
def json_dump(data, output, mappings=None):
if data is None:
output.write('null')
elif isinstance(data, dict):
output.write('{')
for index, key in enumerate(sorted(data)):
json_dump(key, output, mappings)
output.write(':')
json_dump(data[key], output, mappings)
if index < len(data)-1:
output.write(',')
output.write('}')
elif isinstance(data, (list, tuple)):
output.write('[')
for index, item in enumerate(data):
json_dump(item, output, mappings)
if index < len(data)-1:
output.write(',')
output.write(']')
elif mappings and data in mappings:
output.write(mappings[data])
elif isinstance(data, (int, long)):
output.write(str(data))
elif isinstance(data, basestring):
output.write(encode_basestring(data))
else:
raise Exception('Data type (%s) serialization not supported' % type(data))
| [
[
2,
0,
0.4531,
0.2344,
0,
0.66,
0,
207,
0,
1,
1,
0,
0,
0,
12
],
[
2,
1,
0.3906,
0.0781,
1,
0.47,
0,
553,
0,
1,
1,
0,
0,
0,
3
],
[
14,
2,
0.375,
0.0156,
2,
0.36,
... | [
"def encode_basestring(string):\n def get_matching_char(c):\n val = ord(c)\n if val < 127 and val > 31:\n return c\n return '\\\\u' + hex(val)[2:].rjust(4,'0')\n # TODO: Our log doesn't contain all these control chars\n string = string.replace('\\\\', '\\\\\\\\')",
" de... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import utils
class XUnitWriter:
"""Provides an xUnit-compatible result file.
Attempts to adhere to the de facto schema guessed by Peter Reilly, see:
http://marc.info/?l=ant-dev&m=123551933508682
"""
def __init__(self, output):
self._writer = utils.XmlWriter(output)
self._root_suite = None
self._detail_serializer = _NopSerializer()
def close(self):
self._writer.close()
def start_suite(self, suite):
if self._root_suite:
return
self._root_suite = suite
attrs = {'name': suite.name,
'tests': str(suite.get_test_count()),
'errors': '0',
'failures': str(suite.all_stats.failed),
'skip': '0'}
self._writer.start('testsuite', attrs)
def end_suite(self, suite):
if suite is self._root_suite:
self._writer.end('testsuite')
def start_test(self, test):
attrs = {'classname': test.parent.get_long_name(),
'name': test.name,
'time': self._time_as_seconds(test.elapsedtime)}
self._writer.start('testcase', attrs)
if test.status == 'FAIL':
self._detail_serializer = _FailedTestSerializer(self._writer, test)
def _time_as_seconds(self, millis):
return str(int(round(millis, -3) / 1000))
def end_test(self, test):
self._detail_serializer.end_test()
self._detail_serializer = _NopSerializer()
self._writer.end('testcase')
def start_keyword(self, kw):
pass
def end_keyword(self, kw):
pass
def message(self, msg):
self._detail_serializer.message(msg)
class _FailedTestSerializer:
"""Specific policy to serialize a failed test case details"""
def __init__(self, writer, test):
self._writer = writer
self._writer.start('failure',
{'message': test.message, 'type': 'AssertionError'})
def end_test(self):
self._writer.end('failure')
def message(self, msg):
"""Populates the <failure> section, normally only with a 'Stacktrace'.
There is a weakness here because filtering is based on message level:
- DEBUG level is used by RF for 'Tracebacks' (what is expected here)
- INFO and TRACE are used for keywords and arguments (not errors)
- first FAIL message is already reported as <failure> attribute
"""
if msg.level == 'DEBUG':
self._writer.content(msg.message)
class _NopSerializer:
"""Default policy when there's no detail to serialize"""
def end_test(self):
pass
def message(self, msg):
pass
| [
[
1,
0,
0.1524,
0.0095,
0,
0.66,
0,
735,
0,
1,
0,
0,
735,
0,
0
],
[
3,
0,
0.4333,
0.5143,
0,
0.66,
0.3333,
553,
0,
10,
0,
0,
0,
0,
19
],
[
8,
1,
0.2095,
0.0476,
1,
... | [
"from robot import utils",
"class XUnitWriter:\n \"\"\"Provides an xUnit-compatible result file.\n\n Attempts to adhere to the de facto schema guessed by Peter Reilly, see:\n http://marc.info/?l=ant-dev&m=123551933508682\n \"\"\"\n\n def __init__(self, output):",
" \"\"\"Provides an xUnit-comp... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.common import Statistics
from robot.output import LOGGER, process_outputs
from outputwriter import OutputWriter
from xunitwriter import XUnitWriter
from builders import LogBuilder, ReportBuilder, XUnitBuilder, OutputBuilder
import jsparser
class ResultWriter(object):
def __init__(self, settings):
self._xml_result = None
self._suite = None
self._settings = settings
self._data_sources = None
def write_robot_results(self, data_source):
self._data_sources = [data_source]
self._data_model = None
LogBuilder(self).build()
ReportBuilder(self).build()
XUnitBuilder(self).build()
@property
def data_model(self):
if self._data_model is None:
self._data_model = jsparser.create_datamodel_from(self._data_sources[0], self._settings['SplitLog'])
return self._data_model
@property
def settings(self):
return self._settings
@property
def result_from_xml(self):
if self._xml_result is None:
self._suite, errs = process_outputs(self._data_sources, self._settings)
self._suite.set_options(self._settings)
self._xml_result = ResultFromXML(self._suite, errs, self._settings)
return self._xml_result
def write_rebot_results(self, *data_sources):
self._data_sources = data_sources
builder = OutputBuilder(self)
self.write_robot_results(builder.build())
builder.finalize()
return self._suite
class ResultFromXML(object):
def __init__(self, suite, exec_errors, settings=None):
self.suite = suite
self.exec_errors = exec_errors
if settings:
params = (settings['SuiteStatLevel'], settings['TagStatInclude'],
settings['TagStatExclude'], settings['TagStatCombine'],
settings['TagDoc'], settings['TagStatLink'])
else:
params = ()
self.statistics = Statistics(suite, *params)
self._generator = 'Robot'
def serialize_output(self, path, log=True):
if path == 'NONE':
return
serializer = OutputWriter(path)
self.suite.serialize(serializer)
self.statistics.serialize(serializer)
self.exec_errors.serialize(serializer)
serializer.close()
if log:
LOGGER.output_file('Output', path)
def serialize_xunit(self, path):
if path == 'NONE':
return
serializer = XUnitWriter(path)
try:
self.suite.serialize(serializer)
finally:
serializer.close()
LOGGER.output_file('XUnit', path)
| [
[
1,
0,
0.1515,
0.0101,
0,
0.66,
0,
355,
0,
1,
0,
0,
355,
0,
0
],
[
1,
0,
0.1616,
0.0101,
0,
0.66,
0.1429,
596,
0,
2,
0,
0,
596,
0,
0
],
[
1,
0,
0.1818,
0.0101,
0,
... | [
"from robot.common import Statistics",
"from robot.output import LOGGER, process_outputs",
"from outputwriter import OutputWriter",
"from xunitwriter import XUnitWriter",
"from builders import LogBuilder, ReportBuilder, XUnitBuilder, OutputBuilder",
"import jsparser",
"class ResultWriter(object):\n\n ... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zlib
import base64
from operator import itemgetter
from robot import utils
class _Handler(object):
def __init__(self, context, attrs=None):
self._context = context
self._data_from_children = []
self._handlers = {
'robot' : _RobotHandler,
'suite' : _SuiteHandler,
'test' : _TestHandler,
'statistics' : _StatisticsHandler,
'stat' : _StatItemHandler,
'errors' : _Handler,
'doc' : _HtmlTextHandler,
'kw' : _KeywordHandler,
'arg' : _ArgumentHandler,
'arguments' : _ArgumentsHandler,
'tag' : _TextHandler,
'tags' : _Handler,
'msg' : _MsgHandler,
'status' : _StatusHandler,
'metadata' : _MetadataHandler,
'item' : _MetadataItemHandler,
}
def get_handler_for(self, name, attrs):
return self._handlers[name](self._context, attrs)
def add_child_data(self, data):
self._data_from_children.append(data)
def end_element(self, text):
return self._data_from_children
def _get_id(self, item):
return self._context.get_id(item)
def _get_ids(self, items):
return [self._context.get_id(i) for i in items]
class RootHandler(_Handler):
# TODO: Combine _RootHandler and _RobotHandler
@property
def data(self):
return self._data_from_children[0]
class _RobotHandler(_Handler):
def __init__(self, context, attrs):
_Handler.__init__(self, context)
self._generator = attrs.get('generator')
def end_element(self, text):
return {'generator': self._generator,
'suite': self._data_from_children[0],
'stats': self._data_from_children[1],
'errors': self._data_from_children[2],
'baseMillis': self._context.basemillis,
'strings': self._context.dump_texts()}
class _SuiteHandler(_Handler):
def __init__(self, context, attrs):
_Handler.__init__(self, context)
self._name = attrs.get('name')
self._source = attrs.get('source') or ''
self._suites = []
self._tests = []
self._keywords = []
self._current_children = None
self._context.start_suite(self._name)
self._context.collect_stats()
def get_handler_for(self, name, attrs):
self._current_children = {
'suite': self._suites,
'test': self._tests,
'kw': self._keywords
}.get(name, self._data_from_children)
return _Handler.get_handler_for(self, name, attrs)
def add_child_data(self, data):
self._current_children.append(data)
def end_element(self, text):
result = self._get_ids([self._source, self._name]) + \
self._data_from_children + [self._suites] + \
[self._tests] + [self._keywords] + \
[self._get_ids(self._context.dump_stats())]
self._context.end_suite()
return result
class _TestHandler(_Handler):
def __init__(self, context, attrs):
_Handler.__init__(self, context)
self._name = attrs.get('name')
self._timeout = attrs.get('timeout')
self._keywords = []
self._current_children = None
self._context.start_test(self._name)
def get_handler_for(self, name, attrs):
if name == 'status':
# TODO: Use 1/0 instead of Y/N. Possibly also 1/0/-1 instead of P/F/N.
self._critical = 'Y' if attrs.get('critical') == 'yes' else 'N'
self._current_children = {
'kw': self._keywords
}.get(name, self._data_from_children)
return _Handler.get_handler_for(self, name, attrs)
def add_child_data(self, data):
self._current_children.append(data)
def end_element(self, text):
# TODO: refactor
self._context.add_test(self._critical == 'Y', self._data_from_children[-1][0] == self._get_id('P'))
kws = self._context.end_test(self._keywords)
result = self._get_ids([self._name, self._timeout, self._critical]) + self._data_from_children
result.append(kws)
return result
class _KeywordHandler(_Handler):
def __init__(self, context, attrs):
_Handler.__init__(self, context)
self._context.start_keyword()
self._type = attrs.get('type')
if self._type == 'for': self._type = 'forloop'
self._name = attrs.get('name')
self._timeout = attrs.get('timeout')
self._keywords = []
self._messages = []
self._current_children = None
def get_handler_for(self, name, attrs):
if name == 'status':
# TODO: Use 1/0 instead of Y/N. Possibly also 1/0/-1 instead of P/F/N.
self._critical = 'Y' if attrs.get('critical') == 'yes' else 'N'
self._current_children = {
'kw': self._keywords,
'msg': self._messages
}.get(name, self._data_from_children)
return _Handler.get_handler_for(self, name, attrs)
def add_child_data(self, data):
self._current_children.append(data)
def end_element(self, text):
if self._type == 'teardown' and self._data_from_children[-1][0] == self._get_id('F'):
self._context.teardown_failed()
result = self._get_ids([self._type, self._name, self._timeout]) + \
self._data_from_children + [self._keywords] + [self._messages]
self._context.end_keyword()
return result
# TODO: StatisticsHandler and StatItemHandler should be separated somehow from suite handlers
class _StatisticsHandler(_Handler):
def get_handler_for(self, name, attrs):
return _Handler(self._context, attrs)
class _StatItemHandler(_Handler):
def __init__(self, context, attrs):
_Handler.__init__(self, context)
self._attrs = dict(attrs)
self._attrs['pass'] = int(self._attrs['pass'])
self._attrs['fail'] = int(self._attrs['fail'])
if 'doc' in self._attrs:
self._attrs['doc'] = utils.html_format(self._attrs['doc'])
# TODO: Should we only dump attrs that have value?
# Tag stats have many attrs that are normally empty
def end_element(self, text):
self._attrs.update(label=text)
return self._attrs
class _StatusHandler(_Handler):
def __init__(self, context, attrs):
self._context = context
self._status = attrs.get('status')[0]
self._starttime = self._context.timestamp(attrs.get('starttime'))
self._elapsed = self._calculate_elapsed(attrs)
def _calculate_elapsed(self, attrs):
endtime = self._context.timestamp(attrs.get('endtime'))
# Must compare against None because either start and end may be 0.
if self._starttime is not None or endtime is not None:
return endtime - self._starttime
# Only RF 2.6+ outputs have elapsedtime when start or end is N/A.
return int(attrs.get('elapsedtime', 0))
def end_element(self, text):
result = [self._status, self._starttime, self._elapsed]
if text:
result.append(text)
return self._get_ids(result)
class _ArgumentHandler(_Handler):
def end_element(self, text):
return text
class _ArgumentsHandler(_Handler):
def end_element(self, text):
return self._get_id(', '.join(self._data_from_children))
class _TextHandler(_Handler):
def end_element(self, text):
return self._get_id(text)
class _HtmlTextHandler(_Handler):
def end_element(self, text):
return self._get_id(utils.html_format(text))
class _MetadataHandler(_Handler):
def __init__(self, context, attrs):
_Handler.__init__(self, context)
self._metadata = []
def add_child_data(self, data):
self._metadata.extend(data)
def end_element(self, text):
return self._metadata
class _MetadataItemHandler(_Handler):
def __init__(self, context, attrs):
_Handler.__init__(self, context)
self._name = attrs.get('name')
def end_element(self, text):
return self._get_ids([self._name, utils.html_format(text)])
class _MsgHandler(_Handler):
def __init__(self, context, attrs):
_Handler.__init__(self, context)
self._msg = [self._context.timestamp(attrs.get('timestamp')),
attrs.get('level')[0]]
self._is_html = attrs.get('html')
self._is_linkable = attrs.get("linkable") == "yes"
def end_element(self, text):
self._msg.append(text if self._is_html else utils.html_escape(text))
self._handle_warning_linking()
return self._get_ids(self._msg)
def _handle_warning_linking(self):
# TODO: should perhaps use the id version of this list for indexing?
if self._is_linkable:
self._msg.append(self._context.link_to(self._msg))
elif self._msg[1] == 'W':
self._context.create_link_to_current_location(self._msg)
class Context(object):
def __init__(self, split_tests=False):
self._main_text_cache = TextCache()
self._current_texts = self._main_text_cache
self._split_text_caches = []
self._basemillis = 0
self._stats = Stats()
self._current_place = []
self._kw_index = []
self._links = {}
self._split_tests = split_tests
self._split_results = []
@property
def basemillis(self):
return self._basemillis
@property
def split_results(self):
return self._split_results
def collect_stats(self):
self._stats = self._stats.new_child()
return self
def dump_stats(self):
try:
return self._stats.dump()
finally:
self._stats = self._stats.parent
def get_id(self, value):
if value is None:
return None
if isinstance(value, basestring):
return self._get_text_id(value)
if isinstance(value, (int, long)):
return value
raise TypeError('Unsupported type %s' % type(value))
def _get_text_id(self, text):
return self._current_texts.add(text)
def dump_texts(self):
return self._current_texts.dump()
def timestamp(self, time):
if time == 'N/A':
return None
millis = int(utils.timestamp_to_secs(time, millis=True) * 1000)
if not self._basemillis:
self._basemillis = millis
return millis - self.basemillis
def start_suite(self, name):
self._current_place.append(('suite', name))
self._kw_index.append(0)
def end_suite(self):
self._current_place.pop()
self._kw_index.pop()
def start_test(self, name):
if self._split_tests:
self._split_text_caches.append(TextCache())
self._current_place.append(('test', name))
self._kw_index.append(0)
def end_test(self, kw_data=None):
self._current_place.pop()
self._kw_index.pop()
if self._split_tests:
self._split_results.append((kw_data, self._split_text_caches[-1].dump()))
return len(self._split_results)
return kw_data
def start_keyword(self):
if self._split_tests and self._current_place[-1][0] == 'test':
self._current_texts = self._split_text_caches[-1]
self._current_place.append(('keyword', self._kw_index[-1]))
self._kw_index[-1] += 1
self._kw_index.append(0)
def end_keyword(self):
self._current_place.pop()
self._kw_index.pop()
if self._split_tests and self._current_place[-1][0] == 'test':
self._current_texts = self._main_text_cache
def create_link_to_current_location(self, key):
self._links[tuple(key)] = self._create_link()
def _create_link(self):
return "keyword_"+".".join(str(v) for _, v in self._current_place)
def link_to(self, key):
return self._links[tuple(key)]
def add_test(self, critical, passed):
self._stats.add_test(critical, passed)
def teardown_failed(self):
self._stats.fail_all()
class Stats(object):
TOTAL = 0
TOTAL_PASSED = 1
CRITICAL = 2
CRITICAL_PASSED = 3
def __init__(self, parent=None):
self.parent = parent
self._stats = [0,0,0,0]
self._children = []
def new_child(self):
self._children.append(Stats(self))
return self._children[-1]
def add_test(self, critical, passed):
self._stats[Stats.TOTAL] += 1
if passed:
self._stats[Stats.TOTAL_PASSED] +=1
if critical:
self._stats[Stats.CRITICAL] += 1
if passed:
self._stats[Stats.CRITICAL_PASSED] += 1
def dump(self):
if self.parent:
for i in range(4):
self.parent._stats[i] += self._stats[i]
return self._stats
def fail_all(self):
self._stats[1] = 0
self._stats[3] = 0
for child in self._children:
child.fail_all()
class TextIndex(int):
pass
ZERO_TEXT_INDEX = TextIndex(0)
class TextCache(object):
# TODO: Tune compressing thresholds
_compress_threshold = 20
_use_compressed_threshold = 1.1
def __init__(self):
self.texts = {'*': ZERO_TEXT_INDEX}
self.index = 1
def add(self, text):
if not text:
return 0
text = self._encode(text)
if text not in self.texts:
self.texts[text] = TextIndex(self.index)
self.index += 1
return self.texts[text]
def _encode(self, text):
raw = self._raw(text)
if raw in self.texts or len(raw) < self._compress_threshold:
return raw
compressed = self._compress(text)
if len(compressed) * self._use_compressed_threshold < len(raw):
return compressed
return raw
def _compress(self, text):
return base64.b64encode(zlib.compress(text.encode('UTF-8'), 9))
def _raw(self, text):
return '*'+text
def dump(self):
# TODO: Could we yield or return an iterator?
# TODO: Duplicate with IntegerCache.dump
return [item[0] for item in sorted(self.texts.iteritems(),
key=itemgetter(1))]
| [
[
1,
0,
0.0309,
0.0021,
0,
0.66,
0,
373,
0,
1,
0,
0,
373,
0,
0
],
[
1,
0,
0.0329,
0.0021,
0,
0.66,
0.0417,
177,
0,
1,
0,
0,
177,
0,
0
],
[
1,
0,
0.035,
0.0021,
0,
0... | [
"import zlib",
"import base64",
"from operator import itemgetter",
"from robot import utils",
"class _Handler(object):\n\n def __init__(self, context, attrs=None):\n self._context = context\n self._data_from_children = []\n self._handlers = {\n 'robot' : _RobotHandler... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from robot import utils
import json
from robot.result.elementhandlers import TextIndex
# TODO: Rename to match the responsibility - this isn't really a model but a class writing model to outputs
class DataModel(object):
def __init__(self, robot_data, split_results=None):
self._robot_data = robot_data
self._split_results = split_results
self._settings = None
self._set_generated(time.localtime())
def _set_generated(self, timetuple):
genMillis = long(time.mktime(timetuple) * 1000) -\
self._robot_data['baseMillis']
self._set_attr('generatedMillis', genMillis)
self._set_attr('generatedTimestamp',
utils.format_time(timetuple, gmtsep=' '))
def _set_attr(self, name, value):
self._robot_data[name] = value
def set_settings(self, settings):
self._settings = settings
def write_to(self, output, separator='', split_threshold=9500):
writer = _SeparatingWriter(output, separator)
writer.write('window.output = {};\n')
writer.separator()
for key, value in self._robot_data.items():
self._write_output_element(key, split_threshold, value, writer)
writer.separator()
writer.dump_json('window.settings = ', self._settings)
def _write_output_element(self, key, split_threshold, value, writer):
if key == 'suite':
splitWriter = SplittingSuiteWriter(writer, split_threshold)
data, mapping = splitWriter.write(self._robot_data['suite'])
writer.dump_json('window.output["suite"] = ', data, mapping=mapping)
elif key == 'strings':
self._dump_and_split_strings(split_threshold, writer)
else:
writer.dump_json('window.output["%s"] = ' % key, value)
def _dump_and_split_strings(self, split_threshold, writer):
strings = self._robot_data['strings']
writer.write('window.output["strings"] = [];\n')
while strings:
writer.separator()
writer.dump_json('window.output["strings"] = window.output["strings"].concat(', strings[:split_threshold], ');\n')
strings = strings[split_threshold:]
def remove_keywords(self):
self._robot_data['suite'] = self._remove_keywords_from(self._robot_data['suite'])
self._prune_unused_indices()
# TODO: this and remove_keywords should be removed
# instead there should be a reportify or write_for_report_to method
def remove_errors(self):
self._robot_data.pop('errors')
def _remove_keywords_from(self, data):
if not isinstance(data, list):
return data
return [self._remove_keywords_from(item) for item in data
if not self._is_ignorable_keyword(item)]
def _is_ignorable_keyword(self, item):
# Top level teardown is kept to make tests fail if suite teardown failed
# TODO: Could we store information about failed suite teardown otherwise?
# TODO: Cleanup?
return item and \
isinstance(item, list) and \
(isinstance(item[0], TextIndex)) and \
self._robot_data['strings'][item[0]] in \
['*kw', '*setup', '*forloop', '*foritem']
def _prune_unused_indices(self):
used = self._collect_used_indices(self._robot_data['suite'], set())
remap = {}
self._robot_data['strings'] = \
list(self._prune(self._robot_data['strings'], used, remap))
self._remap_indices(self._robot_data['suite'], remap)
def _prune(self, data, used, index_remap, map_index=None, offset_increment=1):
offset = 0
for index, text in enumerate(data):
index = map_index(index) if map_index else index
if index in used:
index_remap[index] = index - offset
yield text
else:
offset += offset_increment
def _remap_indices(self, data, remap):
for i, item in enumerate(data):
if isinstance(item, TextIndex):
data[i] = remap[item]
elif isinstance(item, list):
self._remap_indices(item, remap)
def _collect_used_indices(self, data, result):
for item in data:
if isinstance(item, (int, long)):
result.add(item)
elif isinstance(item, list):
self._collect_used_indices(item, result)
elif isinstance(item, dict):
self._collect_used_indices(item.values(), result)
self._collect_used_indices(item.keys(), result)
return result
class _SeparatingWriter(object):
def __init__(self, output, separator):
self._output = output
self._separator = separator
def separator(self):
self._output.write(self._separator)
def dump_json(self, prefix, data_block, postfix = ';\n', mapping=None):
if prefix:
self._output.write(prefix)
json.json_dump(data_block, self._output, mappings=mapping)
self._output.write(postfix)
def write(self, string):
self._output.write(string)
class _SubResult(object):
def __init__(self, data_block, size, mapping):
self.data_block = data_block
self.size = size
self.mapping = mapping
def update(self, subresult):
self.data_block += [subresult.data_block]
self.size += subresult.size
if subresult.mapping:
self.mapping.update(subresult.mapping)
def link(self, name):
key = object()
return _SubResult(key, 1, {key:name})
class SplittingSuiteWriter(object):
def __init__(self, writer, split_threshold):
self._index = 0
self._writer = writer
self._split_threshold = split_threshold
def write(self, data_block):
result = self._write(data_block)
return result.data_block, result.mapping
def _write(self, data_block):
if not isinstance(data_block, list):
return _SubResult(data_block, 1, None)
result = _SubResult([], 1, {})
for item in data_block:
result.update(self._write(item))
if result.size > self._split_threshold:
result = self._dump_suite_part(result)
return result
def _list_name(self):
return 'window.sPart%s' % self._index
def _dump_suite_part(self, result):
self._writer.dump_json(self._list_name()+' = ', result.data_block, mapping=result.mapping)
self._writer.separator()
new_result = result.link(self._list_name())
self._index += 1
return new_result
| [
[
1,
0,
0.0765,
0.0051,
0,
0.66,
0,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0816,
0.0051,
0,
0.66,
0.1429,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.0918,
0.0051,
0,
... | [
"import time",
"from robot import utils",
"import json",
"from robot.result.elementhandlers import TextIndex",
"class DataModel(object):\n\n def __init__(self, robot_data, split_results=None):\n self._robot_data = robot_data\n self._split_results = split_results\n self._settings = No... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from xml import sax
from robot.result.elementhandlers import RootHandler, Context
from robot.result.jsondatamodel import DataModel
def create_datamodel_from(input_filename, split_log=False):
context = Context(split_log)
robot = _RobotOutputHandler(context)
with open(input_filename, 'r') as input:
sax.parse(input, robot)
return robot.datamodel
def parse_js(input_filename, output):
create_datamodel_from(input_filename).write_to(output)
class _RobotOutputHandler(sax.handler.ContentHandler):
def __init__(self, context):
self._context = context
self._root_handler = RootHandler(context)
self._handler_stack = [self._root_handler]
@property
def datamodel(self):
return DataModel(self._root_handler.data, self._context.split_results)
def startElement(self, name, attrs):
handler = self._handler_stack[-1].get_handler_for(name, attrs)
self._charbuffer = []
self._handler_stack.append(handler)
def endElement(self, name):
handler = self._handler_stack.pop()
self._handler_stack[-1].add_child_data(handler.end_element(self.text))
def characters(self, content):
self._charbuffer += [content]
@property
def text(self):
return ''.join(self._charbuffer)
| [
[
1,
0,
0.25,
0.0167,
0,
0.66,
0,
777,
0,
1,
0,
0,
777,
0,
0
],
[
1,
0,
0.2667,
0.0167,
0,
0.66,
0.1667,
324,
0,
1,
0,
0,
324,
0,
0
],
[
1,
0,
0.3,
0.0167,
0,
0.66,... | [
"from __future__ import with_statement",
"from xml import sax",
"from robot.result.elementhandlers import RootHandler, Context",
"from robot.result.jsondatamodel import DataModel",
"def create_datamodel_from(input_filename, split_log=False):\n context = Context(split_log)\n robot = _RobotOutputHandler... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from resultwriter import ResultWriter
| [
[
1,
0,
1,
0.0625,
0,
0.66,
0,
548,
0,
1,
0,
0,
548,
0,
0
]
] | [
"from resultwriter import ResultWriter"
] |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.output import XmlLogger
class OutputWriter(XmlLogger):
def __init__(self, path):
XmlLogger.__init__(self, path, generator='Rebot')
def message(self, msg):
self._write_message(msg)
| [
[
1,
0,
0.6,
0.04,
0,
0.66,
0,
596,
0,
1,
0,
0,
596,
0,
0
],
[
3,
0,
0.84,
0.28,
0,
0.66,
1,
416,
0,
2,
0,
0,
137,
0,
2
],
[
2,
1,
0.82,
0.08,
1,
0.37,
0,
5... | [
"from robot.output import XmlLogger",
"class OutputWriter(XmlLogger):\n\n def __init__(self, path):\n XmlLogger.__init__(self, path, generator='Rebot')\n\n def message(self, msg):\n self._write_message(msg)",
" def __init__(self, path):\n XmlLogger.__init__(self, path, generator='R... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Public logging API for test libraries.
This module provides a public API for writing messages to the log file
and the console. Test libraries can use this API like `logger.info('My
message')` instead of logging through the standard output like `print
'*INFO* My message'`. In addition to a programmatic interface being
cleaner to use, this API has a benefit that the log messages have
accurate timestamps.
Log levels
----------
It is possible to log messages using levels `TRACE`, `DEBUG`, `INFO`
and `WARN` either using the `write` method or, more commonly, with the
log level specific `trace`, `debug`, `info` and `warn` methods.
By default the trace and debug messages are not logged but that can be
changed with the `--loglevel` command line option. Warnings are
automatically written also to the `Test Execution Errors` section in
the log file and to the console.
Logging HTML
------------
All methods that are used for writing messages to the log file have an
optional `html` argument. If a message to be logged is supposed to be
shown as HTML, this argument should be set to `True`.
Example
-------
from robot.api import logger
def my_keyword(arg):
logger.debug('Got argument %s' % arg)
do_something()
logger.info('<i>This</i> is a boring example', html=True)
"""
import sys
from robot.output import LOGGER, Message
def write(msg, level, html=False):
"""Writes the message to the log file using the given level.
Valid log levels are `TRACE`, `DEBUG`, `INFO` and `WARN`. Instead
of using this method, it is generally better to use the level
specific methods such as `info` and `debug`.
"""
LOGGER.log_message(Message(msg, level, html))
def trace(msg, html=False):
"""Writes the message to the log file with the TRACE level."""
write(msg, 'TRACE', html)
def debug(msg, html=False):
"""Writes the message to the log file with the DEBUG level."""
write(msg, 'DEBUG', html)
def info(msg, html=False, also_console=False):
"""Writes the message to the log file with the INFO level.
If `also_console` argument is set to `True`, the message is written
both to the log file and to the console.
"""
write(msg, 'INFO', html)
if also_console:
console(msg)
def warn(msg, html=False):
"""Writes the message to the log file with the WARN level."""
write(msg, 'WARN', html)
def console(msg, newline=True):
"""Writes the message to the console.
If the `newline` argument is `True`, a newline character is automatically
added to the message.
"""
if newline:
msg += '\n'
sys.__stdout__.write(msg)
| [
[
8,
0,
0.3418,
0.3878,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.551,
0.0102,
0,
0.66,
0.125,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.5714,
0.0102,
0,
0.66,
... | [
"\"\"\"Public logging API for test libraries.\n\nThis module provides a public API for writing messages to the log file\nand the console. Test libraries can use this API like `logger.info('My\nmessage')` instead of logging through the standard output like `print\n'*INFO* My message'`. In addition to a programmatic ... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| [] | [] |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import utils
# Return codes from Robot and Rebot.
# RC below 250 is the number of failed critical tests and exactly 250
# means that number or more such failures.
INFO_PRINTED = 251 # --help or --version
DATA_ERROR = 252 # Invalid data or cli args
STOPPED_BY_USER = 253 # KeyboardInterrupt or SystemExit
FRAMEWORK_ERROR = 255 # Unexpected error
class RobotError(Exception):
"""Base class for Robot Framework errors.
Do not raise this method but use more specific errors instead.
"""
def __init__(self, message=''):
Exception.__init__(self, message)
def __unicode__(self):
# Needed to handle exceptions w/ Unicode correctly on Python 2.5
return unicode(self.args[0]) if self.args else u''
class FrameworkError(RobotError):
"""Can be used when the core framework goes to unexpected state.
It is good to explicitly raise a FrameworkError if some framework
component is used incorrectly. This is pretty much same as
'Internal Error' and should of course never happen.
"""
class DataError(RobotError):
"""Used when the provided test data is invalid.
DataErrors are not be caught by keywords that run other keywords
(e.g. `Run Keyword And Expect Error`). Libraries should thus use
this exception with care.
"""
class TimeoutError(RobotError):
"""Used when a test or keyword timeout occurs.
This exception is handled specially so that execution of the
current test is always stopped immediately and it is not caught by
keywords executing other keywords (e.g. `Run Keyword And Expect
Error`). Libraries should thus NOT use this exception themselves.
"""
class Information(RobotError):
"""Used by argument parser with --help or --version."""
class ExecutionFailed(RobotError):
"""Used for communicating failures in test execution."""
def __init__(self, message, timeout=False, syntax=False, exit=False,
cont=False, exit_for_loop=False):
RobotError.__init__(self, utils.cut_long_message(message))
self.timeout = timeout
self.syntax = syntax
self.exit = exit
self.cont = cont
self.exit_for_loop = exit_for_loop
@property
def dont_cont(self):
return self.timeout or self.syntax or self.exit
# Python 2.6 property decorators would have nice `setter` attribute:
# http://docs.python.org/library/functions.html#property
cont = property(lambda self: self._cont and not self.dont_cont,
lambda self, cont: setattr(self, '_cont', cont))
def can_continue(self, teardown=False, templated=False, dry_run=False):
if dry_run:
return True
if self.dont_cont and not (teardown and self.syntax):
return False
if teardown or templated:
return True
return self.cont
def get_errors(self):
return [self]
class HandlerExecutionFailed(ExecutionFailed):
def __init__(self):
details = utils.ErrorDetails()
timeout = isinstance(details.error, TimeoutError)
syntax = isinstance(details.error, DataError)
exit = bool(getattr(details.error, 'ROBOT_EXIT_ON_FAILURE', False))
cont = bool(getattr(details.error, 'ROBOT_CONTINUE_ON_FAILURE', False))
exit_for_loop = bool(getattr(details.error, 'ROBOT_EXIT_FOR_LOOP', False))
ExecutionFailed.__init__(self, details.message, timeout, syntax,
exit, cont, exit_for_loop)
self.full_message = details.message
self.traceback = details.traceback
class ExecutionFailures(ExecutionFailed):
def __init__(self, errors):
msg = self._format_message([unicode(e) for e in errors])
ExecutionFailed.__init__(self, msg, **self._get_attrs(errors))
self._errors = errors
def _format_message(self, messages):
if len(messages) == 1:
return messages[0]
lines = ['Several failures occurred:'] \
+ ['%d) %s' % (i+1, m) for i, m in enumerate(messages)]
return '\n\n'.join(lines)
def _get_attrs(self, errors):
return {'timeout': any(err.timeout for err in errors),
'syntax': any(err.syntax for err in errors),
'exit': any(err.exit for err in errors),
'cont': all(err.cont for err in errors),
'exit_for_loop': all(err.exit_for_loop for err in errors)}
def get_errors(self):
return self._errors
class UserKeywordExecutionFailed(ExecutionFailures):
def __init__(self, run_errors=None, teardown_errors=None):
no_errors = ExecutionFailed('', cont=True, exit_for_loop=True)
ExecutionFailures.__init__(self, [run_errors or no_errors,
teardown_errors or no_errors])
if run_errors and not teardown_errors:
self._errors = run_errors.get_errors()
else:
self._errors = [self]
def _format_message(self, messages):
run_msg, td_msg = messages
if not td_msg:
return run_msg
if not run_msg:
return 'Keyword teardown failed:\n%s' % td_msg
return '%s\n\nAlso keyword teardown failed:\n%s' % (run_msg, td_msg)
class RemoteError(RobotError):
"""Used by Remote library to report remote errors."""
def __init__(self, message, traceback):
RobotError.__init__(self, message)
self.traceback = traceback
| [
[
1,
0,
0.0877,
0.0058,
0,
0.66,
0,
970,
0,
1,
0,
0,
970,
0,
0
],
[
14,
0,
0.1228,
0.0058,
0,
0.66,
0.0714,
755,
1,
0,
0,
0,
0,
1,
0
],
[
14,
0,
0.1287,
0.0058,
0,
... | [
"import utils",
"INFO_PRINTED = 251 # --help or --version",
"DATA_ERROR = 252 # Invalid data or cli args",
"STOPPED_BY_USER = 253 # KeyboardInterrupt or SystemExit",
"FRAMEWORK_ERROR = 255 # Unexpected error",
"class RobotError(Exception):\n \"\"\"Base class for Robot Framework errors.\... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot import utils
from robot.errors import DataError, FrameworkError
from robot.output import LOGGER
class _BaseSettings(object):
_cli_opts = {'Name' : ('name', None),
'Doc' : ('doc', None),
'Metadata' : ('metadata', []),
'TestNames' : ('test', []),
'SuiteNames' : ('suite', []),
'SetTag' : ('settag', []),
'Include' : ('include', []),
'Exclude' : ('exclude', []),
'Critical' : ('critical', None),
'NonCritical' : ('noncritical', None),
'OutputDir' : ('outputdir', '.'),
'Log' : ('log', 'log.html'),
'Report' : ('report', 'report.html'),
'Summary' : ('summary', 'NONE'),
'XUnitFile' : ('xunitfile', 'NONE'),
'SplitLog' : ('splitlog', False),
'SplitOutputs' : ('splitoutputs', -1),
'TimestampOutputs' : ('timestampoutputs', False),
'LogTitle' : ('logtitle', None),
'ReportTitle' : ('reporttitle', None),
'SummaryTitle' : ('summarytitle', None),
'ReportBackground' : ('reportbackground',
('#99FF66', '#99FF66', '#FF3333')),
'SuiteStatLevel' : ('suitestatlevel', -1),
'TagStatInclude' : ('tagstatinclude', []),
'TagStatExclude' : ('tagstatexclude', []),
'TagStatCombine' : ('tagstatcombine', []),
'TagDoc' : ('tagdoc', []),
'TagStatLink' : ('tagstatlink', []),
'NoStatusRC' : ('nostatusrc', False),
'RunEmptySuite' : ('runemptysuite', False),
'MonitorWidth' : ('monitorwidth', 78),
'MonitorColors' : ('monitorcolors', 'AUTO')}
_output_opts = ['Output', 'Log', 'Report', 'DebugFile', 'XUnitFile']
def __init__(self, options={}, log=True):
self._opts = {}
self._cli_opts.update(self._extra_cli_opts)
self._process_cli_opts(options, log)
if log: LOGGER.info('Settings:\n%s' % unicode(self))
def _process_cli_opts(self, opts, log):
for name, (cli_name, default) in self._cli_opts.items():
try:
value = opts[cli_name]
if value in [None, []]:
raise KeyError
except KeyError:
value = default
self[name] = self._process_value(name, value, log)
def __setitem__(self, name, value):
if name not in self._cli_opts:
raise KeyError("Non-existing settings '%s'" % name)
self._opts[name] = value
def _process_value(self, name, value, log):
if value == self._get_default_value(name):
return value
if name in ['Name', 'Doc', 'LogTitle', 'ReportTitle']:
if name == 'Doc': value = self._escape(value)
return value.replace('_', ' ')
if name in ['Metadata', 'TagDoc']:
if name == 'Metadata': value = [self._escape(v) for v in value]
return [self._process_metadata_or_tagdoc(v) for v in value]
if name in ['Include', 'Exclude']:
return [v.replace('AND', '&').replace('_', ' ') for v in value]
if name in self._output_opts and utils.eq(value, 'NONE'):
return 'NONE'
if name == 'OutputDir':
return utils.abspath(value)
if name in ['SuiteStatLevel', 'MonitorWidth']:
return self._convert_to_positive_integer_or_default(name, value)
if name in ['Listeners', 'VariableFiles']:
return [self._split_args_from_name(item) for item in value]
if name == 'ReportBackground':
return self._process_report_background(value)
if name == 'TagStatCombine':
return [self._process_tag_stat_combine(v) for v in value]
if name == 'TagStatLink':
return [v for v in [self._process_tag_stat_link(v) for v in value] if v]
if name == 'RemoveKeywords':
return value.upper()
if name in ['SplitOutputs', 'Summary', 'SummaryTitle']:
return self._removed_in_26(name, log)
return value
# TODO: Remove --splitoutputs, --summary, and --summarytitle in 2.7
def _removed_in_26(self, name, log):
start = {'SplitOutputs': 'Splitting outputs is',
'Summary': 'Summary reports are',
'SummaryTitle': 'Summary titles are'}[name]
option, default = self._cli_opts[name]
if log:
LOGGER.warn('%s not supported in Robot Framework 2.6 or newer and '
'--%s option will be removed altogether in version 2.7.'
% (start, option))
return default
def __getitem__(self, name):
if name not in self._cli_opts:
raise KeyError("Non-existing setting '%s'" % name)
if name in self._output_opts:
return self._get_output_file(name)
return self._opts[name]
def _get_output_file(self, type_):
"""Returns path of the requested output file and creates needed dirs.
`type_` can be 'Output', 'Log', 'Report', 'DebugFile' or 'XUnitFile'.
"""
name = self._opts[type_]
if self._outputfile_disabled(type_, name):
return 'NONE'
name = self._process_output_name(name, type_)
path = utils.abspath(os.path.join(self['OutputDir'], name))
self._create_output_dir(os.path.dirname(path), type_)
return path
def _process_output_name(self, name, type_):
base, ext = os.path.splitext(name)
if self['TimestampOutputs']:
base = '%s-%s' % (base, utils.get_start_timestamp('', '-', ''))
ext = self._get_output_extension(ext, type_)
return base + ext
def _get_output_extension(self, ext, type_):
if ext != '':
return ext
if type_ in ['Output', 'XUnitFile']:
return '.xml'
if type_ in ['Log', 'Report']:
return '.html'
if type_ == 'DebugFile':
return '.txt'
raise FrameworkError("Invalid output file type: %s" % type_)
def _create_output_dir(self, path, type_):
try:
if not os.path.exists(path):
os.makedirs(path)
except:
raise DataError("Can't create %s file's parent directory '%s': %s"
% (type_.lower(), path, utils.get_error_message()))
def _process_metadata_or_tagdoc(self, value):
value = value.replace('_', ' ')
if ':' in value:
return value.split(':', 1)
return value, ''
def _process_report_background(self, colors):
if colors.count(':') not in [1, 2]:
LOGGER.error("Invalid report background colors '%s'." % colors)
return self._get_default_value('ReportBackground')
colors = colors.split(':')
if len(colors) == 2:
return colors[0], colors[0], colors[1]
return tuple(colors)
def _process_tag_stat_combine(self, value):
for replwhat, replwith in [('_', ' '), ('AND', '&'),
('&', ' & '), ('NOT', ' NOT ')]:
value = value.replace(replwhat, replwith)
if ':' in value:
return value.rsplit(':', 1)
return value, ''
def _process_tag_stat_link(self, value):
tokens = value.split(':')
if len(tokens) >= 3:
return tokens[0], ':'.join(tokens[1:-1]), tokens[-1]
LOGGER.error("Invalid format for option '--tagstatlink'. "
"Expected 'tag:link:title' but got '%s'." % value)
return None
def _convert_to_positive_integer_or_default(self, name, value):
value = self._convert_to_integer(name, value)
return value if value > 0 else self._get_default_value(name)
def _convert_to_integer(self, name, value):
try:
return int(value)
except ValueError:
LOGGER.error("Option '--%s' expected integer value but got '%s'. "
"Default value used instead." % (name.lower(), value))
return self._get_default_value(name)
def _get_default_value(self, name):
return self._cli_opts[name][1]
def _split_args_from_name(self, name):
if ':' not in name or os.path.exists(name):
return name, []
args = name.split(':')
name = args.pop(0)
# Handle absolute Windows paths with arguments
if len(name) == 1 and args[0].startswith(('/', '\\')):
name = name + ':' + args.pop(0)
return name, args
def __contains__(self, setting):
return setting in self._cli_opts
def __unicode__(self):
return '\n'.join('%s: %s' % (name, self._opts[name])
for name in sorted(self._opts))
class RobotSettings(_BaseSettings):
_extra_cli_opts = {'Output' : ('output', 'output.xml'),
'LogLevel' : ('loglevel', 'INFO'),
'RunMode' : ('runmode', []),
'WarnOnSkipped' : ('warnonskippedfiles', False),
'Variables' : ('variable', []),
'VariableFiles' : ('variablefile', []),
'Listeners' : ('listener', []),
'DebugFile' : ('debugfile', 'NONE')}
def is_rebot_needed(self):
return not ('NONE' == self['Log'] == self['Report'] == self['XUnitFile'])
def get_rebot_datasource_and_settings(self):
datasource = self['Output']
settings = RebotSettings(log=False)
settings._opts.update(self._opts)
for name in ['Variables', 'VariableFiles', 'Listeners']:
del(settings._opts[name])
for name in ['Include', 'Exclude', 'TestNames', 'SuiteNames', 'Metadata']:
settings._opts[name] = []
for name in ['Output', 'RemoveKeywords']:
settings._opts[name] = 'NONE'
for name in ['Name', 'Doc']:
settings._opts[name] = None
settings._opts['LogLevel'] = 'TRACE'
return datasource, settings
def _outputfile_disabled(self, type_, name):
if name == 'NONE':
return True
return self._opts['Output'] == 'NONE' and type_ != 'DebugFile'
def _escape(self, value):
return utils.escape(value)
class RebotSettings(_BaseSettings):
_extra_cli_opts = {'Output' : ('output', 'NONE'),
'LogLevel' : ('loglevel', 'TRACE'),
'RemoveKeywords' : ('removekeywords', 'NONE'),
'StartTime' : ('starttime', 'N/A'),
'EndTime' : ('endtime', 'N/A')}
def _outputfile_disabled(self, type_, name):
return name == 'NONE'
def _escape(self, value):
return value
| [
[
1,
0,
0.0536,
0.0036,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0607,
0.0036,
0,
0.66,
0.1667,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.0643,
0.0036,
0,
... | [
"import os",
"from robot import utils",
"from robot.errors import DataError, FrameworkError",
"from robot.output import LOGGER",
"class _BaseSettings(object):\n _cli_opts = {'Name' : ('name', None),\n 'Doc' : ('doc', None),\n 'Metadata' : (... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from settings import RobotSettings, RebotSettings
| [
[
1,
0,
1,
0.0667,
0,
0.66,
0,
168,
0,
2,
0,
0,
168,
0,
0
]
] | [
"from settings import RobotSettings, RebotSettings"
] |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import utils
from robot.errors import DataError
LEVELS = {
"NONE" : 100,
"ERROR" : 60,
"FAIL" : 50,
"WARN" : 40,
"INFO" : 30,
"DEBUG" : 20,
"TRACE" : 10,
}
class AbstractLogger:
def __init__(self, level='TRACE'):
self._is_logged = IsLogged(level)
def set_level(self, level):
return self._is_logged.set_level(level)
def trace(self, msg):
self.write(msg, 'TRACE')
def debug(self, msg):
self.write(msg, 'DEBUG')
def info(self, msg):
self.write(msg, 'INFO')
def warn(self, msg):
self.write(msg, 'WARN')
def fail(self, msg):
self.write(msg, 'FAIL')
def error(self, msg):
self.write(msg, 'ERROR')
def write(self, message, level, html=False):
self.message(Message(message, level, html))
def message(self, msg):
raise NotImplementedError(self.__class__)
class Message(object):
def __init__(self, message, level='INFO', html=False, timestamp=None, linkable=False):
self.message = self._get_message(message)
self.level, self.html = self._get_level_and_html(level, html)
self.timestamp = self._get_timestamp(timestamp)
self.linkable = linkable
def _get_message(self, msg):
if not isinstance(msg, basestring):
msg = utils.unic(msg)
return msg.replace('\r\n', '\n')
def _get_level_and_html(self, level, html):
level = level.upper()
if level == 'HTML':
return 'INFO', True
if level not in LEVELS:
raise DataError("Invalid log level '%s'" % level)
return level, html
def _get_timestamp(self, timestamp):
if timestamp:
return timestamp
return utils.get_timestamp(daysep='', daytimesep=' ',
timesep=':', millissep='.')
def get_timestamp(self, sep=' '):
return self.timestamp.replace(' ', sep)
@property
def time(self):
if ' ' not in self.timestamp:
return self.timestamp
return self.timestamp.split()[1]
class IsLogged:
def __init__(self, level):
self._str_level = level
self._int_level = self._level_to_int(level)
def __call__(self, level):
return self._level_to_int(level) >= self._int_level
def set_level(self, level):
old = self._str_level.upper()
self.__init__(level)
return old
def _level_to_int(self, level):
try:
return LEVELS[level.upper()]
except KeyError:
raise DataError("Invalid log level '%s'" % level)
class AbstractLoggerProxy:
_methods = NotImplemented
def __init__(self, logger):
self.logger = logger
default = lambda *args: None
for name in self._methods:
try:
method = getattr(logger, name)
except AttributeError:
method = getattr(logger, self._toCamelCase(name), default)
setattr(self, name, method)
def _toCamelCase(self, name):
parts = name.split('_')
return ''.join([parts[0]] + [part.capitalize() for part in parts[1:]])
| [
[
1,
0,
0.1168,
0.0073,
0,
0.66,
0,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.1241,
0.0073,
0,
0.66,
0.1667,
299,
0,
1,
0,
0,
299,
0,
0
],
[
14,
0,
0.1752,
0.0657,
0,
... | [
"from robot import utils",
"from robot.errors import DataError",
"LEVELS = {\n \"NONE\" : 100,\n \"ERROR\" : 60,\n \"FAIL\" : 50,\n \"WARN\" : 40,\n \"INFO\" : 30,\n \"DEBUG\" : 20,\n \"TRACE\" : 10,",
"class AbstractLogger:\n\n def __init__(self, level='TRACE'):\n self._is_logged = IsLog... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import utils
from robot.errors import DataError
from robot.version import get_full_version
from loggerhelper import IsLogged
class XmlLogger:
def __init__(self, path, log_level='TRACE', generator='Robot'):
self._log_message_is_logged = IsLogged(log_level)
self._error_message_is_logged = IsLogged('WARN')
self._writer = self._get_writer(path, generator)
self._errors = []
def _get_writer(self, path, generator):
try:
writer = utils.XmlWriter(path)
except:
raise DataError("Opening output file '%s' for writing failed: %s"
% (path, utils.get_error_message()))
writer.start('robot', {'generator': get_full_version(generator),
'generated': utils.get_timestamp()})
return writer
def close(self):
self.start_errors()
for msg in self._errors:
self._write_message(msg)
self.end_errors()
self._writer.end('robot')
self._writer.close()
def set_log_level(self, level):
return self._log_message_is_logged.set_level(level)
def message(self, msg):
if self._error_message_is_logged(msg.level):
self._errors.append(msg)
def log_message(self, msg):
if self._log_message_is_logged(msg.level):
self._write_message(msg)
def _write_message(self, msg):
attrs = {'timestamp': msg.timestamp, 'level': msg.level}
if msg.html:
attrs['html'] = 'yes'
if msg.linkable:
attrs['linkable'] = 'yes'
self._writer.element('msg', msg.message, attrs)
def start_keyword(self, kw):
self._writer.start('kw', {'name': kw.name, 'type': kw.type,
'timeout': kw.timeout})
self._writer.element('doc', kw.doc)
self._write_list('arguments', 'arg', kw.args)
def end_keyword(self, kw):
self._write_status(kw)
self._writer.end('kw')
def start_test(self, test):
self._writer.start('test', {'name': test.name,
'timeout': test.timeout})
self._writer.element('doc', test.doc)
def end_test(self, test):
self._write_list('tags', 'tag', test.tags)
self._write_status(test, test.message, {'critical': test.critical})
self._writer.end('test')
def start_suite(self, suite):
attrs = {'name': suite.name}
if suite.source:
attrs['source'] = suite.source
self._writer.start('suite', attrs)
self._writer.element('doc', suite.doc)
self._writer.start('metadata')
for name, value in suite.get_metadata():
self._writer.element('item', value, {'name': name})
self._writer.end('metadata')
def end_suite(self, suite):
self._write_status(suite, suite.message)
self._writer.end('suite')
def start_statistics(self, stats):
self._writer.start('statistics')
def end_statistics(self, stats):
self._writer.end('statistics')
def start_total_stats(self, total_stats):
self._writer.start('total')
def end_total_stats(self, total_stats):
self._writer.end('total')
def start_tag_stats(self, tag_stats):
self._writer.start('tag')
def end_tag_stats(self, tag_stats):
self._writer.end('tag')
def start_suite_stats(self, tag_stats):
self._writer.start('suite')
def end_suite_stats(self, tag_stats):
self._writer.end('suite')
def total_stat(self, stat):
self._stat(stat)
def suite_stat(self, stat):
self._stat(stat, stat.long_name, attrs={'name': stat.name})
def tag_stat(self, stat):
self._stat(stat, attrs={'info': self._get_tag_stat_info(stat),
'links': self._get_tag_links(stat),
'doc': stat.doc,
'combined': stat.combined})
def _get_tag_links(self, stat):
return ':::'.join(':'.join([title, url]) for url, title in stat.links)
def _stat(self, stat, name=None, attrs=None):
attrs = attrs or {}
attrs['pass'] = str(stat.passed)
attrs['fail'] = str(stat.failed)
self._writer.element('stat', name or stat.name, attrs)
def _get_tag_stat_info(self, stat):
if stat.critical:
return 'critical'
if stat.non_critical:
return 'non-critical'
if stat.combined:
return 'combined'
return ''
def start_errors(self):
self._writer.start('errors')
def end_errors(self):
self._writer.end('errors')
def _write_list(self, container_tag, item_tag, items):
self._writer.start(container_tag)
for item in items:
self._writer.element(item_tag, item)
self._writer.end(container_tag)
def _write_status(self, item, message=None, extra_attrs=None):
attrs = {'status': item.status, 'starttime': item.starttime,
'endtime': item.endtime}
if item.starttime == 'N/A' or item.endtime == 'N/A':
attrs['elapsedtime'] = item.elapsedtime
if extra_attrs:
attrs.update(extra_attrs)
self._writer.element('status', message, attrs)
| [
[
1,
0,
0.0857,
0.0057,
0,
0.66,
0,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.0914,
0.0057,
0,
0.66,
0.25,
299,
0,
1,
0,
0,
299,
0,
0
],
[
1,
0,
0.0971,
0.0057,
0,
0.... | [
"from robot import utils",
"from robot.errors import DataError",
"from robot.version import get_full_version",
"from loggerhelper import IsLogged",
"class XmlLogger:\n\n def __init__(self, path, log_level='TRACE', generator='Robot'):\n self._log_message_is_logged = IsLogged(log_level)\n sel... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.common.statistics import Statistics
from loggerhelper import AbstractLogger
from logger import LOGGER
from xmllogger import XmlLogger
from listeners import Listeners
from debugfile import DebugFile
from stdoutlogsplitter import StdoutLogSplitter
class Output(AbstractLogger):
def __init__(self, settings):
AbstractLogger.__init__(self)
self._xmllogger = XmlLogger(settings['Output'], settings['LogLevel'])
self._register_loggers(settings['Listeners'], settings['DebugFile'])
self._settings = settings
self._set_global_output()
def _register_loggers(self, listeners, debugfile):
LOGGER.register_context_changing_logger(self._xmllogger)
for logger in Listeners(listeners), DebugFile(debugfile):
if logger: LOGGER.register_logger(logger)
LOGGER.disable_message_cache()
def _set_global_output(self):
# This is a hack. Hopefully we get rid of it at some point.
from robot import output
output.OUTPUT = self
def close(self, suite):
stats = Statistics(suite, self._settings['SuiteStatLevel'],
self._settings['TagStatInclude'],
self._settings['TagStatExclude'],
self._settings['TagStatCombine'],
self._settings['TagDoc'],
self._settings['TagStatLink'])
stats.serialize(self._xmllogger)
self._xmllogger.close()
LOGGER.unregister_logger(self._xmllogger)
LOGGER.output_file('Output', self._settings['Output'])
def start_suite(self, suite):
LOGGER.start_suite(suite)
def end_suite(self, suite):
LOGGER.end_suite(suite)
def start_test(self, test):
LOGGER.start_test(test)
def end_test(self, test):
LOGGER.end_test(test)
def start_keyword(self, kw):
LOGGER.start_keyword(kw)
def end_keyword(self, kw):
LOGGER.end_keyword(kw)
def log_output(self, output):
for msg in StdoutLogSplitter(output):
self.message(msg)
def message(self, msg):
LOGGER.log_message(msg)
def set_log_level(self, level):
return self._xmllogger.set_log_level(level)
| [
[
1,
0,
0.1807,
0.012,
0,
0.66,
0,
471,
0,
1,
0,
0,
471,
0,
0
],
[
1,
0,
0.2048,
0.012,
0,
0.66,
0.1429,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.2169,
0.012,
0,
0.6... | [
"from robot.common.statistics import Statistics",
"from loggerhelper import AbstractLogger",
"from logger import LOGGER",
"from xmllogger import XmlLogger",
"from listeners import Listeners",
"from debugfile import DebugFile",
"from stdoutlogsplitter import StdoutLogSplitter",
"class Output(AbstractLo... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from robot import utils
from highlighting import Highlighter, NoHighlighting
from loggerhelper import IsLogged
class CommandLineMonitor:
def __init__(self, width=78, colors='AUTO'):
self._width = width
self._highlighter = StatusHighlighter(colors)
self._is_logged = IsLogged('WARN')
self._started = False
def start_suite(self, suite):
if not self._started:
self._write_separator('=')
self._started = True
self._write_info(suite.longname, suite.doc, start_suite=True)
self._write_separator('=')
def end_suite(self, suite):
self._write_info(suite.longname, suite.doc)
self._write_status(suite.status)
self._write_message(suite.get_full_message())
self._write_separator('=')
def start_test(self, test):
self._write_info(test.name, test.doc)
def end_test(self, test):
self._write_status(test.status)
self._write_message(test.message)
self._write_separator('-')
def message(self, msg):
if self._is_logged(msg.level):
self._write_with_highlighting('[ ', msg.level, ' ] ' + msg.message,
stream=sys.__stderr__)
def output_file(self, name, path):
self._write('%-8s %s' % (name+':', path))
def _write_info(self, name, doc, start_suite=False):
maxwidth = self._width
if not start_suite:
maxwidth -= len(' | PASS |')
info = self._get_info(name, doc, maxwidth)
self._write(info, newline=start_suite)
def _get_info(self, name, doc, maxwidth):
if utils.get_console_length(name) > maxwidth:
return utils.pad_console_length(name, maxwidth, cut_left=True)
info = name if not doc else '%s :: %s' % (name, doc.splitlines()[0])
return utils.pad_console_length(info, maxwidth)
def _write_status(self, status):
self._write_with_highlighting(' | ', status, ' |')
def _write_message(self, message):
if message:
self._write(message.strip())
def _write_separator(self, sep_char):
self._write(sep_char * self._width)
def _write(self, message, newline=True, stream=sys.__stdout__):
if newline:
message += '\n'
stream.write(utils.encode_output(message).replace('\t', ' '*8))
stream.flush()
def _write_with_highlighting(self, before, highlighted, after,
newline=True, stream=sys.__stdout__):
self._write(before, newline=False, stream=stream)
self._highlighter.start(highlighted, stream)
self._write(highlighted, newline=False, stream=stream)
self._highlighter.end()
self._write(after, newline=newline, stream=stream)
class StatusHighlighter:
def __init__(self, colors):
self._current = None
self._highlighters = {
sys.__stdout__: self._get_highlighter(sys.__stdout__, colors),
sys.__stderr__: self._get_highlighter(sys.__stderr__, colors)
}
def start(self, message, stream=sys.__stdout__):
self._current = self._highlighters[stream]
{'PASS': self._current.green,
'FAIL': self._current.red,
'ERROR': self._current.red,
'WARN': self._current.yellow}[message]()
def end(self):
self._current.reset()
def _get_highlighter(self, stream, colors):
auto = hasattr(stream, 'isatty') and stream.isatty()
enable = {'AUTO': auto,
'ON': True,
'FORCE': True, # compatibility with 2.5.5 and earlier
'OFF': False}.get(colors.upper(), auto)
return Highlighter(stream) if enable else NoHighlighting(stream)
| [
[
1,
0,
0.124,
0.0083,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.1405,
0.0083,
0,
0.66,
0.2,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.1488,
0.0083,
0,
0.66... | [
"import sys",
"from robot import utils",
"from highlighting import Highlighter, NoHighlighting",
"from loggerhelper import IsLogged",
"class CommandLineMonitor:\n\n def __init__(self, width=78, colors='AUTO'):\n self._width = width\n self._highlighter = StatusHighlighter(colors)\n se... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from robot import utils
from robot.errors import DataError
from robot.common import BaseTestSuite, BaseTestCase, BaseKeyword
from robot.output import LOGGER
from robot.output.loggerhelper import IsLogged, Message
def process_outputs(paths, settings):
if not paths:
raise DataError('No output files given.')
if len(paths) == 1:
return process_output(paths[0], log_level=settings['LogLevel'],
settings=settings)
suite = CombinedTestSuite(settings)
exec_errors = CombinedExecutionErrors()
for path in paths:
subsuite, suberrors = process_output(path, log_level=settings['LogLevel'])
suite.add_suite(subsuite)
exec_errors.add(suberrors)
return suite, exec_errors
def process_output(path, log_level=None, settings=None):
"""Process one output file and return TestSuite and ExecutionErrors"""
if not os.path.isfile(path):
raise DataError("Output file '%s' does not exist." % path)
LOGGER.info("Processing output file '%s'." % path)
try:
root = utils.etreewrapper.get_root(path)
except:
raise DataError("Opening XML file '%s' failed: %s"
% (path, utils.get_error_message()))
suite = TestSuite(_get_suite_node(root, path), log_level=log_level,
settings=settings)
errors = ExecutionErrors(_get_errors_node(root))
return suite, errors
def _get_suite_node(root, path):
if root.tag != 'robot':
raise DataError("File '%s' is not Robot Framework output file." % path)
node = root.find('suite')
node.set('generator', root.get('generator', 'notset').split()[0].lower())
node.set('path', path)
return node
def _get_errors_node(root):
return root.find('errors')
class _MissingStatus:
"""If XML was fixed for example by fixml.py, status tag may be missing"""
text = 'Could not find status.'
get = lambda self, name, default: name == 'status' and 'FAIL' or 'N/A'
class _BaseReader:
def __init__(self, node):
self.doc = self._get_doc(node)
stnode = node.find('status')
if stnode is None:
stnode = _MissingStatus()
self.status = stnode.get('status','').upper()
if self.status not in ['PASS','FAIL', 'NOT_RUN']:
raise DataError("Item '%s' has invalid status '%s'"
% (self.name, self.status))
self.message = stnode.text or ''
self.starttime = stnode.get('starttime', 'N/A')
self.endtime = stnode.get('endtime', 'N/A')
self.elapsedtime = utils.get_elapsed_time(self.starttime, self.endtime)
def _get_doc(self, node):
docnode = node.find('doc')
if docnode is not None:
return docnode.text or ''
return ''
class _TestAndSuiteReader(_BaseReader):
def __init__(self, node, log_level=None):
_BaseReader.__init__(self, node)
self.keywords = [Keyword(kw, log_level) for kw in node.findall('kw')]
if self.keywords and self.keywords[0].type == 'setup':
self.setup = self.keywords.pop(0)
if self.keywords and self.keywords[-1].type == 'teardown':
self.teardown = self.keywords.pop(-1)
class _SuiteReader(_TestAndSuiteReader):
def __init__(self, node, log_level=None):
_TestAndSuiteReader.__init__(self, node, log_level)
del(self.keywords)
for metanode in node.findall('metadata/item'):
self.metadata[metanode.get('name')] = metanode.text
def _get_texts(self, node, path):
return [item.text for item in node.findall(path)]
class _TestReader(_TestAndSuiteReader):
def __init__(self, node, log_level=None):
_TestAndSuiteReader.__init__(self, node, log_level)
self.tags = [tag.text for tag in node.findall('tags/tag')]
self.timeout = node.get('timeout', '')
class _KeywordReader(_BaseReader):
def __init__(self, node, log_level=None):
_BaseReader.__init__(self, node)
del(self.message)
self.args = [(arg.text or '') for arg in node.findall('arguments/arg')]
self.type = node.get('type', 'kw')
self.timeout = node.get('timeout', '')
self.keywords = []
self.messages = []
self.children = []
log_filter = IsLogged(log_level or 'TRACE')
for child in node:
if child.tag == 'kw':
kw = Keyword(child, log_level)
self.keywords.append(kw)
self.children.append(kw)
elif child.tag == 'msg' and log_filter(child.get('level', 'INFO')):
msg = MessageFromXml(child)
self.messages.append(msg)
self.children.append(msg)
class TestSuite(BaseTestSuite, _SuiteReader):
def __init__(self, node, parent=None, log_level=None, settings=None):
BaseTestSuite.__init__(self, node.get('name'),
node.get('source', None), parent)
_SuiteReader.__init__(self, node, log_level=log_level)
self._set_times_from_settings(settings)
for snode in node.findall('suite'):
snode.set('generator', node.get('generator'))
snode.set('path', node.get('path'))
TestSuite(snode, parent=self, log_level=log_level)
for tnode in node.findall('test'):
TestCase(tnode, parent=self, log_level=log_level)
self.set_status()
if node.get('generator') == 'robot' and \
self.teardown and self.teardown.status == 'FAIL':
self.suite_teardown_failed()
def _set_times_from_settings(self, settings):
starttime, endtime = self._times_from_settings(settings)
if not self.starttime or starttime != 'N/A':
self.starttime = starttime
if not self.endtime or endtime != 'N/A':
self.endtime = endtime
self.elapsedtime = utils.get_elapsed_time(self.starttime, self.endtime)
def _times_from_settings(self, settings):
if not settings:
return 'N/A', 'N/A'
return (self._get_time(settings['StartTime']),
self._get_time(settings['EndTime']))
def _get_time(self, timestamp):
if not timestamp or utils.eq(timestamp, 'N/A'):
return 'N/A'
try:
secs = utils.timestamp_to_secs(timestamp, seps=list(' :.-_'),
millis=True)
except ValueError:
return 'N/A'
return utils.secs_to_timestamp(secs, millis=True)
def set_status(self):
BaseTestSuite.set_status(self)
if self.starttime == 'N/A' or self.endtime == 'N/A':
subitems = self.suites + self.tests + [self.setup, self.teardown]
self.elapsedtime = sum(item.elapsedtime for item in subitems
if item is not None )
def _set_critical_tags(self, critical):
BaseTestSuite._set_critical_tags(self, critical)
self.set_status()
def _filter_by_tags(self, incls, excls):
ret = BaseTestSuite._filter_by_tags(self, incls, excls)
self.starttime = self.endtime = 'N/A'
self.set_status()
return ret
def _filter_by_names(self, suites, tests):
ret = BaseTestSuite._filter_by_names(self, suites, tests)
self.starttime = self.endtime = 'N/A'
self.set_status()
return ret
def remove_keywords(self, how):
should_remove = ShouldRemoveCallable(how)
if not should_remove:
return
self._remove_fixture_keywords(should_remove)
for suite in self.suites:
suite.remove_keywords(how)
for test in self.tests:
test.remove_keywords(should_remove)
def _remove_fixture_keywords(self, should_remove):
critical_failures = self.critical_stats.failed != 0
for kw in self.setup, self.teardown:
if should_remove(kw, critical_failures):
kw.remove_data()
class CombinedTestSuite(TestSuite):
def __init__(self, settings):
BaseTestSuite.__init__(self, name='')
self.starttime = self.endtime = None
self._set_times_from_settings(settings)
def add_suite(self, suite):
self.suites.append(suite)
suite.parent = self
self._add_suite_to_stats(suite)
self.status = self.critical_stats.failed == 0 and 'PASS' or 'FAIL'
if self.starttime == 'N/A' or self.endtime == 'N/A':
self.elapsedtime += suite.elapsedtime
class TestCase(BaseTestCase, _TestReader):
def __init__(self, node, parent, log_level=None):
BaseTestCase.__init__(self, node.get('name'), parent)
_TestReader.__init__(self, node, log_level=log_level)
self.set_criticality(parent.critical)
def remove_keywords(self, should_remove):
if should_remove(self, (self.status != 'PASS')):
for kw in self.keywords + [self.setup, self.teardown]:
if kw is not None:
kw.remove_data()
def contains_warnings(self):
return any(kw.contains_warnings() for kw in self.keywords)
class Keyword(BaseKeyword, _KeywordReader):
def __init__(self, node, log_level=None):
self._init_data()
BaseKeyword.__init__(self, node.get('name'))
_KeywordReader.__init__(self, node, log_level)
def _init_data(self):
self.messages = []
self.keywords = []
self.children = []
def remove_data(self):
self._init_data()
def contains_warnings(self):
return any(msg.level == 'WARN' for msg in self.messages) or \
any(kw.contains_warnings() for kw in self.keywords)
def __str__(self):
return self.name
def __repr__(self):
return "'%s'" % self.name
def serialize(self, serializer):
serializer.start_keyword(self)
for child in self.children:
child.serialize(serializer)
serializer.end_keyword(self)
class MessageFromXml(Message):
def __init__(self, node):
Message.__init__(self, node.text,
level=node.get('level', 'INFO'),
html=node.get('html', 'no') == 'yes',
timestamp=node.get('timestamp', 'N/A'),
linkable=node.get('linkable', 'no') == 'yes')
def serialize(self, serializer):
serializer.message(self)
def __str__(self):
return '%s %s %s' % (self.timestamp, self.level, self.message)
def __repr__(self):
lines = self.message.split('\n')
msg = len(lines) > 1 and lines[0] + '...' or lines[0]
return "'%s %s'" % (self.level, msg.replace("'",'"'))
class ExecutionErrors:
def __init__(self, node):
if node is None:
self.messages = []
else:
self.messages = [MessageFromXml(msg) for msg in node.findall('msg')]
def serialize(self, serializer):
serializer.start_errors()
for msg in self.messages:
msg.serialize(serializer)
serializer.end_errors()
class CombinedExecutionErrors(ExecutionErrors):
def __init__(self):
self.messages = []
def add(self, other):
self.messages += other.messages
def ShouldRemoveCallable(how):
def _removes_all(item, critical_failures):
return item is not None
def _removes_passed_not_containing_warnings(item, critical_failures):
if item is None:
return False
if critical_failures:
return False
return not item.contains_warnings()
how = how.upper()
if how == 'ALL':
return _removes_all
return _removes_passed_not_containing_warnings if how == 'PASSED' else None
| [
[
1,
0,
0.0424,
0.0028,
0,
0.66,
0,
79,
0,
1,
0,
0,
79,
0,
0
],
[
1,
0,
0.048,
0.0028,
0,
0.66,
0.0435,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.0508,
0.0028,
0,
0.6... | [
"import os.path",
"from robot import utils",
"from robot.errors import DataError",
"from robot.common import BaseTestSuite, BaseTestCase, BaseKeyword",
"from robot.output import LOGGER",
"from robot.output.loggerhelper import IsLogged, Message",
"def process_outputs(paths, settings):\n if not paths:\... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from loggerhelper import AbstractLogger
class FileLogger(AbstractLogger):
def __init__(self, path, level):
AbstractLogger.__init__(self, level)
self._writer = self._get_writer(path)
def _get_writer(self, path):
# Hook for unittests
return open(path, 'wb')
def message(self, msg):
if self._is_logged(msg.level):
entry = '%s | %s | %s\n' % (msg.timestamp, msg.level.ljust(5),
msg.message)
self._writer.write(entry.replace('\n', os.linesep).encode('UTF-8'))
def start_suite(self, suite):
self.info("Started test suite '%s'" % suite.name)
def end_suite(self, suite):
self.info("Ended test suite '%s'" % suite.name)
def start_test(self, test):
self.info("Started test case '%s'" % test.name)
def end_test(self, test):
self.info("Ended test case '%s'" % test.name)
def start_keyword(self, kw):
self.debug("Started keyword '%s'" % kw.name)
def end_keyword(self, kw):
self.debug("Ended keyword '%s'" % kw.name)
def output_file(self, name, path):
self.info('%s: %s' % (name, path))
def close(self):
self._writer.close()
| [
[
1,
0,
0.2586,
0.0172,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.2931,
0.0172,
0,
0.66,
0.5,
426,
0,
1,
0,
0,
426,
0,
0
],
[
3,
0,
0.6724,
0.6724,
0,
0.6... | [
"import os",
"from loggerhelper import AbstractLogger",
"class FileLogger(AbstractLogger):\n\n def __init__(self, path, level):\n AbstractLogger.__init__(self, level)\n self._writer = self._get_writer(path)\n\n def _get_writer(self, path):\n # Hook for unittests",
" def __init__(... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot import utils
from logger import LOGGER
from loggerhelper import IsLogged
def DebugFile(path):
if path == 'NONE':
LOGGER.info('No debug file')
return None
try:
LOGGER.info('Debug file: %s' % path)
return _DebugFileWriter(path)
except:
LOGGER.error("Opening debug file '%s' failed and writing to debug file "
"is disabled. Error: %s" % (path, utils.get_error_message()))
return None
class _DebugFileWriter:
_separators = {'SUITE': '=', 'TEST': '-', 'KW': '~'}
def __init__(self, path):
self._indent = 0
self._kw_level = 0
self._separator_written_last = False
self._file = open(path, 'wb')
self._is_logged = IsLogged('DEBUG')
def start_suite(self, suite):
self._separator('SUITE')
self._start('SUITE', suite.longname)
self._separator('SUITE')
def end_suite(self, suite):
self._separator('SUITE')
self._end('SUITE', suite.longname, suite.elapsedtime)
self._separator('SUITE')
if self._indent == 0:
LOGGER.output_file('Debug', self._file.name)
self.close()
def start_test(self, test):
self._separator('TEST')
self._start('TEST', test.name)
self._separator('TEST')
def end_test(self, test):
self._separator('TEST')
self._end('TEST', test.name, test.elapsedtime)
self._separator('TEST')
def start_keyword(self, kw):
if self._kw_level == 0:
self._separator('KW')
self._start(self._get_kw_type(kw), kw.name, kw.args)
self._kw_level += 1
def end_keyword(self, kw):
self._end(self._get_kw_type(kw), kw.name, kw.elapsedtime)
self._kw_level -= 1
def log_message(self, msg):
if self._is_logged(msg.level):
self._write(msg.message)
def close(self):
if not self._file.closed:
self._file.close()
def _get_kw_type(self, kw):
if kw.type in ['setup','teardown']:
return kw.type.upper()
return 'KW'
def _start(self, type_, name, args=''):
args = ' ' + utils.seq2str2(args)
self._write('+%s START %s: %s%s' % ('-'*self._indent, type_, name, args))
self._indent += 1
def _end(self, type_, name, elapsed):
self._indent -= 1
self._write('+%s END %s: %s (%s)' % ('-'*self._indent, type_, name, elapsed))
def _separator(self, type_):
self._write(self._separators[type_] * 78, True)
def _write(self, text, separator=False):
if self._separator_written_last and separator:
return
self._file.write(utils.unic(text).encode('UTF-8').rstrip() + '\n')
self._file.flush()
self._separator_written_last = separator
| [
[
1,
0,
0.1468,
0.0092,
0,
0.66,
0,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.1651,
0.0092,
0,
0.66,
0.25,
532,
0,
1,
0,
0,
532,
0,
0
],
[
1,
0,
0.1743,
0.0092,
0,
0.... | [
"from robot import utils",
"from logger import LOGGER",
"from loggerhelper import IsLogged",
"def DebugFile(path):\n if path == 'NONE':\n LOGGER.info('No debug file')\n return None\n try:\n LOGGER.info('Debug file: %s' % path)\n return _DebugFileWriter(path)\n except:",
... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import sys
from robot import utils
from robot.errors import DataError
from loggerhelper import AbstractLoggerProxy
from logger import LOGGER
if utils.is_jython:
from java.lang import Object
from java.util import HashMap
class Listeners:
_start_attrs = ['doc', 'starttime', 'longname']
_end_attrs = _start_attrs + ['endtime', 'elapsedtime', 'status', 'message']
def __init__(self, listeners):
self._listeners = self._import_listeners(listeners)
self._running_test = False
self._setup_or_teardown_type = None
def __nonzero__(self):
return bool(self._listeners)
def _import_listeners(self, listener_data):
listeners = []
for name, args in listener_data:
try:
listeners.append(_ListenerProxy(name, args))
except:
message, details = utils.get_error_details()
if args:
name += ':' + ':'.join(args)
LOGGER.error("Taking listener '%s' into use failed: %s"
% (name, message))
LOGGER.info("Details:\n%s" % details)
return listeners
def start_suite(self, suite):
for li in self._listeners:
if li.version == 1:
li.call_method(li.start_suite, suite.name, suite.doc)
else:
attrs = self._get_start_attrs(suite, 'metadata')
attrs.update({'tests' : [t.name for t in suite.tests ],
'suites': [s.name for s in suite.suites],
'totaltests': suite.get_test_count()})
li.call_method(li.start_suite, suite.name, attrs)
def end_suite(self, suite):
for li in self._listeners:
if li.version == 1:
li.call_method(li.end_suite, suite.status,
suite.get_full_message())
else:
attrs = self._get_end_attrs(suite)
attrs.update({'statistics': suite.get_stat_message()})
li.call_method(li.end_suite, suite.name, attrs)
def start_test(self, test):
self._running_test = True
for li in self._listeners:
if li.version == 1:
li.call_method(li.start_test, test.name, test.doc, test.tags)
else:
attrs = self._get_start_attrs(test, 'tags')
li.call_method(li.start_test, test.name, attrs)
def end_test(self, test):
self._running_test = False
for li in self._listeners:
if li.version == 1:
li.call_method(li.end_test, test.status, test.message)
else:
attrs = self._get_end_attrs(test, 'tags')
li.call_method(li.end_test, test.name, attrs)
def start_keyword(self, kw):
for li in self._listeners:
if li.version == 1:
li.call_method(li.start_keyword, kw.name, kw.args)
else:
attrs = self._get_start_attrs(kw, 'args', '-longname')
attrs['type'] = self._get_keyword_type(kw, start=True)
li.call_method(li.start_keyword, kw.name, attrs)
def end_keyword(self, kw):
for li in self._listeners:
if li.version == 1:
li.call_method(li.end_keyword, kw.status)
else:
attrs = self._get_end_attrs(kw, 'args', '-longname', '-message')
attrs['type'] = self._get_keyword_type(kw, start=False)
li.call_method(li.end_keyword, kw.name, attrs)
def _get_keyword_type(self, kw, start=True):
# When running setup or teardown, only the top level keyword has type
# set to setup/teardown but we want to pass that type also to all
# start/end_keyword listener methods called below that keyword.
if kw.type == 'kw':
return self._setup_or_teardown_type or 'Keyword'
kw_type = self._get_setup_or_teardown_type(kw)
self._setup_or_teardown_type = kw_type if start else None
return kw_type
def _get_setup_or_teardown_type(self, kw):
return '%s %s' % (('Test' if self._running_test else 'Suite'),
kw.type.title())
def log_message(self, msg):
for li in self._listeners:
if li.version == 2:
li.call_method(li.log_message, self._create_msg_dict(msg))
def message(self, msg):
for li in self._listeners:
if li.version == 2:
li.call_method(li.message, self._create_msg_dict(msg))
def _create_msg_dict(self, msg):
return {'timestamp': msg.timestamp, 'message': msg.message,
'level': msg.level, 'html': 'yes' if msg.html else 'no'}
def output_file(self, name, path):
for li in self._listeners:
li.call_method(getattr(li, '%s_file' % name.lower()), path)
def close(self):
for li in self._listeners:
li.call_method(li.close)
def _get_start_attrs(self, item, *names):
return self._get_attrs(item, self._start_attrs, names)
def _get_end_attrs(self, item, *names):
return self._get_attrs(item, self._end_attrs, names)
def _get_attrs(self, item, defaults, extras):
names = self._get_attr_names(defaults, extras)
return dict((n, self._get_attr_value(item, n)) for n in names)
def _get_attr_names(self, defaults, extras):
names = list(defaults)
for name in extras:
if name.startswith('-'):
names.remove(name[1:])
else:
names.append(name)
return names
def _get_attr_value(self, item, name):
value = getattr(item, name)
return self._take_copy_of_mutable_value(value)
def _take_copy_of_mutable_value(self, value):
if isinstance(value, (dict, utils.NormalizedDict)):
return dict(value)
if isinstance(value, list):
return list(value)
return value
class _ListenerProxy(AbstractLoggerProxy):
_methods = ['start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword', 'log_message', 'message',
'output_file', 'report_file', 'log_file', 'debug_file',
'xunit_file', 'close']
def __init__(self, name, args):
listener = self._import_listener(name, args)
AbstractLoggerProxy.__init__(self, listener)
self.name = name
self.version = self._get_version(listener)
self.is_java = utils.is_jython and isinstance(listener, Object)
self._failed = []
def _import_listener(self, name, args):
listener, source = utils.import_(name, 'listener')
if not inspect.ismodule(listener):
listener = listener(*args)
elif args:
raise DataError("Listeners implemented as modules do not take arguments")
LOGGER.info("Imported listener '%s' with arguments %s (source %s)"
% (name, utils.seq2str2(args), source))
return listener
def _get_version(self, listener):
try:
return int(getattr(listener, 'ROBOT_LISTENER_API_VERSION', 1))
except ValueError:
return 1
def call_method(self, method, *args):
if method in self._failed:
return
if self.is_java:
args = [self._to_map(a) if isinstance(a, dict) else a for a in args]
try:
method(*args)
except:
self._failed.append(method)
self._report_error(method)
def _report_error(self, method):
message, details = utils.get_error_details()
LOGGER.error("Method '%s' of listener '%s' failed and is disabled: %s"
% (method.__name__, self.name, message))
LOGGER.info("Details:\n%s" % details)
def _to_map(self, dictionary):
map = HashMap()
for key, value in dictionary.iteritems():
map.put(key, value)
return map
| [
[
1,
0,
0.0655,
0.0044,
0,
0.66,
0,
878,
0,
1,
0,
0,
878,
0,
0
],
[
1,
0,
0.0699,
0.0044,
0,
0.66,
0.125,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0786,
0.0044,
0,
0... | [
"import inspect",
"import sys",
"from robot import utils",
"from robot.errors import DataError",
"from loggerhelper import AbstractLoggerProxy",
"from logger import LOGGER",
"if utils.is_jython:\n from java.lang import Object\n from java.util import HashMap",
" from java.lang import Object",
... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to configure Python's standard `logging` module.
After this module is imported, messages logged with `logging` module
are, by default, propagated to Robot's log file.
"""
import logging
from robot.api import logger
class RobotHandler(logging.Handler):
def emit(self, record):
method = self._get_logger_method(record.levelno)
method(record.getMessage())
def _get_logger_method(self, level):
if level >= logging.WARNING:
return logger.warn
if level <= logging.DEBUG:
return logger.debug
return logger.info
class NullStream(object):
def write(self, message):
pass
def close(self):
pass
def flush(self):
pass
logging.basicConfig(level=logging.NOTSET, stream=NullStream())
logging.getLogger().addHandler(RobotHandler())
| [
[
8,
0,
0.3208,
0.0943,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3962,
0.0189,
0,
0.66,
0.1667,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.434,
0.0189,
0,
0.66,... | [
"\"\"\"Module to configure Python's standard `logging` module.\n\nAfter this module is imported, messages logged with `logging` module\nare, by default, propagated to Robot's log file.\n\"\"\"",
"import logging",
"from robot.api import logger",
"class RobotHandler(logging.Handler):\n\n def emit(self, recor... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from robot import utils
from loggerhelper import AbstractLogger, AbstractLoggerProxy, Message
from filelogger import FileLogger
from monitor import CommandLineMonitor
class Logger(AbstractLogger):
"""A global logger proxy to which new loggers may be registered.
Whenever something is written to LOGGER in code, all registered loggers are
notified. Messages are also cached and cached messages written to new
loggers when they are registered.
Tools using Robot Framework's internal modules should register their own
loggers at least to get notifications about errors and warnings. A shortcut
to get errors/warnings into console is using 'register_console_logger'.
"""
def __init__(self):
self._loggers = LoggerCollection()
self._message_cache = []
self._register_console_logger()
self._console_logger_disabled = False
def disable_message_cache(self):
self._message_cache = None
def disable_automatic_console_logger(self):
if not self._console_logger_disabled:
self._console_logger_disabled = True
return self._loggers.remove_first_regular_logger()
def register_logger(self, *loggers):
for log in loggers:
logger = self._loggers.register_regular_logger(log)
self._relay_cached_messages_to(logger)
def register_context_changing_logger(self, logger):
log = self._loggers.register_context_changing_logger(logger)
self._relay_cached_messages_to(log)
def _relay_cached_messages_to(self, logger):
if self._message_cache:
for msg in self._message_cache:
logger.message(msg)
def unregister_logger(self, *loggers):
for log in loggers:
self._loggers.unregister_logger(log)
def register_console_logger(self, width=78, colors='AUTO'):
self.disable_automatic_console_logger()
self._register_console_logger(width, colors)
def _register_console_logger(self, width=78, colors='AUTO'):
monitor = CommandLineMonitor(width, colors)
self._loggers.register_regular_logger(monitor)
def register_file_logger(self, path=None, level='INFO'):
if not path:
path = os.environ.get('ROBOT_SYSLOG_FILE', 'NONE')
level = os.environ.get('ROBOT_SYSLOG_LEVEL', level)
if path.upper() == 'NONE':
return
try:
logger = FileLogger(path, level)
except:
self.error("Opening syslog file '%s' failed: %s"
% (path, utils.get_error_message()))
else:
self.register_logger(logger)
def message(self, msg):
"""Messages about what the framework is doing, warnings, errors, ..."""
for logger in self._loggers.all_loggers():
logger.message(msg)
if self._message_cache is not None:
self._message_cache.append(msg)
def log_message(self, msg):
"""Log messages written (mainly) by libraries"""
for logger in self._loggers.all_loggers():
logger.log_message(msg)
if msg.level == 'WARN':
msg.linkable = True
self.message(msg)
def warn(self, msg, log=False):
method = self.log_message if log else self.message
method(Message(msg, 'WARN'))
def output_file(self, name, path):
"""Finished output, report, log, debug, or xunit file"""
for logger in self._loggers.all_loggers():
logger.output_file(name, path)
def close(self):
for logger in self._loggers.all_loggers():
logger.close()
self._loggers = LoggerCollection()
self._message_cache = []
def start_suite(self, suite):
for logger in self._loggers.starting_loggers():
logger.start_suite(suite)
def end_suite(self, suite):
for logger in self._loggers.ending_loggers():
logger.end_suite(suite)
def start_test(self, test):
for logger in self._loggers.starting_loggers():
logger.start_test(test)
def end_test(self, test):
for logger in self._loggers.ending_loggers():
logger.end_test(test)
def start_keyword(self, keyword):
for logger in self._loggers.starting_loggers():
logger.start_keyword(keyword)
def end_keyword(self, keyword):
for logger in self._loggers.ending_loggers():
logger.end_keyword(keyword)
def __iter__(self):
return iter(self._loggers)
class LoggerCollection(object):
def __init__(self):
self._regular_loggers = []
self._context_changing_loggers = []
def register_regular_logger(self, logger):
self._regular_loggers.append(_LoggerProxy(logger))
return self._regular_loggers[-1]
def register_context_changing_logger(self, logger):
self._context_changing_loggers.append(_LoggerProxy(logger))
return self._context_changing_loggers[-1]
def remove_first_regular_logger(self):
return self._regular_loggers.pop(0)
def unregister_logger(self, logger):
self._regular_loggers = [proxy for proxy in self._regular_loggers
if proxy.logger is not logger]
self._context_changing_loggers = [proxy for proxy
in self._context_changing_loggers
if proxy.logger is not logger]
def starting_loggers(self):
return self.all_loggers()
def ending_loggers(self):
return self._regular_loggers + self._context_changing_loggers
def all_loggers(self):
return self._context_changing_loggers + self._regular_loggers
def __iter__(self):
return iter(self.all_loggers())
class _LoggerProxy(AbstractLoggerProxy):
_methods = ['message', 'log_message', 'output_file', 'close',
'start_suite', 'end_suite', 'start_test', 'end_test',
'start_keyword', 'end_keyword']
LOGGER = Logger()
| [
[
1,
0,
0.0833,
0.0052,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0938,
0.0052,
0,
0.66,
0.125,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.1042,
0.0052,
0,
0... | [
"import os",
"from robot import utils",
"from loggerhelper import AbstractLogger, AbstractLoggerProxy, Message",
"from filelogger import FileLogger",
"from monitor import CommandLineMonitor",
"class Logger(AbstractLogger):\n \"\"\"A global logger proxy to which new loggers may be registered.\n\n Whe... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Windows highlighting code adapted from color_console.py. It is copyright
# Andre Burgaud, licensed under the MIT License, and available here:
# http://www.burgaud.com/bring-colors-to-the-windows-console-with-python/
import os
import sys
try:
from ctypes import windll, Structure, c_short, c_ushort, byref
except ImportError: # Not on Windows or using Jython
windll = None
def Highlighter(stream):
if os.sep == '/':
return UnixHighlighter(stream)
return DosHighlighter(stream) if windll else NoHighlighting(stream)
class UnixHighlighter(object):
_ANSI_GREEN = '\033[32m'
_ANSI_RED = '\033[31m'
_ANSI_YELLOW = '\033[33m'
_ANSI_RESET = '\033[0m'
def __init__(self, stream):
self._stream = stream
def green(self):
self._set_color(self._ANSI_GREEN)
def red(self):
self._set_color(self._ANSI_RED)
def yellow(self):
self._set_color(self._ANSI_YELLOW)
def reset(self):
self._set_color(self._ANSI_RESET)
def _set_color(self, color):
self._stream.write(color)
class NoHighlighting(UnixHighlighter):
def _set_color(self, color):
pass
class DosHighlighter(object):
_FOREGROUND_GREEN = 0x2
_FOREGROUND_RED = 0x4
_FOREGROUND_YELLOW = 0x6
_FOREGROUND_GREY = 0x7
_FOREGROUND_INTENSITY = 0x8
_BACKGROUND_MASK = 0xF0
_STDOUT_HANDLE = -11
_STDERR_HANDLE = -12
def __init__(self, stream):
self._handle = self._get_std_handle(stream)
self._orig_colors = self._get_colors()
self._background = self._orig_colors & self._BACKGROUND_MASK
def green(self):
self._set_foreground_colors(self._FOREGROUND_GREEN)
def red(self):
self._set_foreground_colors(self._FOREGROUND_RED)
def yellow(self):
self._set_foreground_colors(self._FOREGROUND_YELLOW)
def reset(self):
self._set_colors(self._orig_colors)
def _get_std_handle(self, stream):
handle = self._STDOUT_HANDLE \
if stream is sys.__stdout__ else self._STDERR_HANDLE
return windll.kernel32.GetStdHandle(handle)
def _get_colors(self):
csbi = _CONSOLE_SCREEN_BUFFER_INFO()
ok = windll.kernel32.GetConsoleScreenBufferInfo(self._handle, byref(csbi))
if not ok: # Call failed, return default console colors (gray on black)
return self._FOREGROUND_GREY
return csbi.wAttributes
def _set_foreground_colors(self, colors):
self._set_colors(colors | self._FOREGROUND_INTENSITY | self._background)
def _set_colors(self, colors):
windll.kernel32.SetConsoleTextAttribute(self._handle, colors)
if windll:
class _COORD(Structure):
_fields_ = [("X", c_short),
("Y", c_short)]
class _SMALL_RECT(Structure):
_fields_ = [("Left", c_short),
("Top", c_short),
("Right", c_short),
("Bottom", c_short)]
class _CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_ = [("dwSize", _COORD),
("dwCursorPosition", _COORD),
("wAttributes", c_ushort),
("srWindow", _SMALL_RECT),
("dwMaximumWindowSize", _COORD)]
| [
[
1,
0,
0.1496,
0.0079,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.1575,
0.0079,
0,
0.66,
0.1429,
509,
0,
1,
0,
0,
509,
0,
0
],
[
7,
0,
0.1772,
0.0315,
0,
... | [
"import os",
"import sys",
"try:\n from ctypes import windll, Structure, c_short, c_ushort, byref\nexcept ImportError: # Not on Windows or using Jython\n windll = None",
" from ctypes import windll, Structure, c_short, c_ushort, byref",
" windll = None",
"def Highlighter(stream):\n if os.s... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robot.output.loggerhelper import Message, LEVELS
from robot import utils
class StdoutLogSplitter(object):
"""Splits messages logged through stdout (or stderr) into Message objects"""
_split_from_levels = re.compile('^(?:\*'
'(%s|HTML)' # Level
'(:\d+(?:\.\d+)?)?' # Optional timestamp
'\*)' % '|'.join(LEVELS), re.MULTILINE)
def __init__(self, output):
self._messages = list(self._get_messages(output.strip()))
def _get_messages(self, output):
for level, timestamp, msg in self._split_output(output):
if timestamp:
timestamp = self._format_timestamp(timestamp[1:])
yield Message(msg.strip(), level, timestamp=timestamp)
def _split_output(self, output):
tokens = self._split_from_levels.split(output)
tokens = self._add_initial_level_and_time_if_needed(tokens)
for i in xrange(0, len(tokens), 3):
yield tokens[i:i+3]
def _add_initial_level_and_time_if_needed(self, tokens):
if self._output_started_with_level(tokens):
return tokens[1:]
return ['INFO', None] + tokens
def _output_started_with_level(self, tokens):
return tokens[0] == ''
def _format_timestamp(self, millis):
return utils.format_time(float(millis)/1000, millissep='.')
def __iter__(self):
return iter(self._messages)
| [
[
1,
0,
0.2679,
0.0179,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.3036,
0.0179,
0,
0.66,
0.3333,
19,
0,
2,
0,
0,
19,
0,
0
],
[
1,
0,
0.3214,
0.0179,
0,
0.... | [
"import re",
"from robot.output.loggerhelper import Message, LEVELS",
"from robot import utils",
"class StdoutLogSplitter(object):\n \"\"\"Splits messages logged through stdout (or stderr) into Message objects\"\"\"\n\n _split_from_levels = re.compile('^(?:\\*'\n '(%s|... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from output import Output
from logger import LOGGER
from monitor import CommandLineMonitor
from xmllogger import XmlLogger
from loggerhelper import LEVELS, Message
from readers import process_output, process_outputs
# Hooks to output. Set by Output.
# Use only if no other way available (e.g. from BuiltIn library)
OUTPUT = None
def TestSuite(outpath):
"""Factory method for getting test suite from an xml output file.
If you want statistics get suite first and say Statistics(suite).
"""
suite, errors = process_output(outpath)
def write_to_file(path=None):
"""Write processed suite (incl. statistics and errors) back to xml.
If path is not given the suite is written into the same file as it
originally was read from.
"""
from robot.result import RobotTestOutput
if path is None:
path = outpath
suite.set_status()
testoutput = RobotTestOutput(suite, errors)
testoutput.serialize_output(path, suite)
suite.write_to_file = write_to_file
return suite
| [
[
1,
0,
0.3137,
0.0196,
0,
0.66,
0,
886,
0,
1,
0,
0,
886,
0,
0
],
[
1,
0,
0.3333,
0.0196,
0,
0.66,
0.1429,
532,
0,
1,
0,
0,
532,
0,
0
],
[
1,
0,
0.3529,
0.0196,
0,
... | [
"from output import Output",
"from logger import LOGGER",
"from monitor import CommandLineMonitor",
"from xmllogger import XmlLogger",
"from loggerhelper import LEVELS, Message",
"from readers import process_output, process_outputs",
"OUTPUT = None",
"def TestSuite(outpath):\n \"\"\"Factory method ... |
#!/usr/bin/env python
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rebot -- Robot Framework Report and Log Generator
Version: <VERSION>
Usage: rebot [options] robot_outputs
or: interpreter /path/robot/rebot.py [options] robot_outputs
or python -m robot.rebot [options] robot_outputs
Inputs to Rebot are XML output files generated by Robot Framework test runs or
earlier Rebot executions. Rebot can be used to generate logs and reports in
HTML format. It can also produce new XML output files which can be further
processed with Rebot or other tools.
When more than one input file is given, a new combined test suite containing
information from given files is created. This allows combining multiple outputs
together to create higher level reports.
For more information about Robot Framework run 'pybot --help' or go to
http://robotframework.org.
Options:
-N --name name Set the name of the top level test suite. Underscores
in the name are converted to spaces. Default name is
created from the name of the executed data source.
-D --doc documentation Set the documentation of the top level test suite.
Underscores in the documentation are converted to
spaces and it may also contain simple HTML formatting
(e.g. *bold* and http://url/).
-M --metadata name:value * Set metadata of the top level test suite.
Underscores in the name and value are converted to
spaces. Value can contain same HTML formatting as
--doc. Example: '--metadata version:1.2'
-G --settag tag * Sets given tag(s) to all executed test cases.
-t --test name * Select test cases to run by name or long name. Name
is case and space insensitive and it can also be a
simple pattern where '*' matches anything and '?'
matches any char. If using '*' and '?' in the console
is problematic see --escape and --argumentfile.
-s --suite name * Select test suites by name. When this option is used
with --test, --include or --exclude, only test cases
in matching suites and also matching other filtering
criteria are selected. Given name can be a simple
pattern similarly as with --test.
-i --include tag * Select test cases to run by tag. Similarly as name in
--test, tag is case and space insensitive. There are
three ways to include test based on tags:
1) One tag as a simple pattern. Tests having a tag
matching the pattern are included. Example: 'it-*'
2) Two or more tags (or patterns) separated by '&' or
'AND'. Only tests having all these tags are included.
Examples: 'tag1&tag2', 'smokeANDowner-*ANDit-10'
3) Two or more tags (or patterns) separated by 'NOT'.
Tests having the first tag but not any of the latter
ones are included. Example: 'it-10NOTsmoke'
-e --exclude tag * Select test cases not to run by tag. These tests are
not run even if they are included with --include.
Tags are excluded using the rules explained in
--include.
-c --critical tag * Tests having given tag are considered critical. If no
critical tags are set, all tags are critical. Tags
can be given as a pattern like e.g. with --test.
Resets possible critical tags set earlier.
-n --noncritical tag * Tests with given tag are not critical even if they
have a tag set with --critical. Tag can be a pattern.
Resets possible non critical tags set earlier.
-d --outputdir dir Where to create output files. The default is the
directory where Rebot is run from and the given path
is considered relative to that unless it is absolute.
-o --output file XML output file. Not created unless this option is
specified. Given path, similarly as paths given to
--log and --report, is relative to --outputdir unless
given as an absolute path. Default: output.xml
-l --log file HTML log file. Can be disabled by giving a special
name 'NONE'. Examples: '--log mylog.html', '-l none'
-r --report file HTML report file. Can be disabled with 'NONE'
similarly as --log. Default is 'report.html'.
-S --summary file Not supported in Robot Framework 2.6 or newer.
-x --xunitfile file xUnit compatible result file. Not created unless this
option is specified.
-T --timestampoutputs When this option is used, timestamp in a format
'YYYYMMDD-hhmmss' is added to all generated output
files between their basename and extension. For
example '-T -o output.xml -r report.html -l none'
creates files like 'output-20070503-154410.xml' and
'report-20070503-154410.html'.
--splitlog TODO: doc
--splitoutputs level Not supported in Robot Framework 2.6 or newer.
--logtitle title Title for the generated test log. The default title
is '<Name Of The Suite> Test Log'. Underscores in
the title are converted into spaces in all titles.
--reporttitle title Title for the generated test report. The default
title is '<Name Of The Suite> Test Report'.
--summarytitle title Not supported in Robot Framework 2.6 or newer.
--reportbackground colors Background colors to use in the report file.
Either 'all_passed:critical_passed:failed' or
'passed:failed'. Both color names and codes work.
Examples: --reportbackground green:yellow:red
--reportbackground #00E:#E00
-L --loglevel level Threshold for selecting messages. Available levels:
TRACE (default), DEBUG, INFO, WARN, NONE (no msgs)
--suitestatlevel level How many levels to show in 'Statistics by Suite'
in log and report. By default all suite levels are
shown. Example: --suitestatlevel 3
--tagstatinclude tag * Include only matching tags in 'Statistics by Tag'
and 'Test Details' in log and report. By default all
tags set in test cases are shown. Given 'tag' can
also be a simple pattern (see e.g. --test).
--tagstatexclude tag * Exclude matching tags from 'Statistics by Tag' and
'Test Details'. This option can be used with
--tagstatinclude similarly as --exclude is used with
--include.
--tagstatcombine tags:name * Create combined statistics based on tags.
These statistics are added into 'Statistics by Tag'
and matching tests into 'Test Details'. If optional
'name' is not given, name of the combined tag is got
from the specified tags. Tags are combined using the
rules explained in --include.
Examples: --tagstatcombine tag1ANDtag2:My_name
--tagstatcombine requirement-*
--tagdoc pattern:doc * Add documentation to tags matching given pattern.
Documentation is shown in 'Test Details' and also as
a tooltip in 'Statistics by Tag'. Pattern can contain
characters '*' (matches anything) and '?' (matches
any char). Documentation can contain formatting
similarly as with --doc option.
Examples: --tagdoc mytag:My_documentation
--tagdoc regression:*See*_http://info.html
--tagdoc owner-*:Original_author
--tagstatlink pattern:link:title * Add external links into 'Statistics by
Tag'. Pattern can contain characters '*' (matches
anything) and '?' (matches any char). Characters
matching to wildcard expressions can be used in link
and title with syntax %N, where N is index of the
match (starting from 1). In title underscores are
automatically converted to spaces.
Examples: --tagstatlink mytag:http://my.domain:Link
--tagstatlink bug-*:http://tracker/id=%1:Bug_Tracker
--removekeywords all|passed Remove keyword data from generated outputs.
Keyword data is not needed when creating reports and
removing it can make the size of an output file
considerably smaller.
'all' - remove data from all keywords
'passed' - remove data only from keywords in passed
test cases and suites
--starttime timestamp Set starting time of test execution when creating
reports. Timestamp must be given in format
'2007-10-01 15:12:42.268' where all separators are
optional (e.g. '20071001151242268' is ok too) and
parts from milliseconds to hours can be omitted if
they are zero (e.g. '2007-10-01'). This can be used
to override starttime of the suite when reports are
created from a single suite or to set starttime for
combined suite, which is otherwise set to 'N/A'.
--endtime timestamp Same as --starttime but for ending time. If both
options are used, elapsed time of the suite is
calculated based on them. For combined suites,
it is otherwise calculated by adding elapsed times
of combined test suites together.
--nostatusrc Sets the return code to zero regardless of failures
in test cases. Error codes are returned normally.
-C --monitorcolors on|off|force Using ANSI colors in console. Normally colors
work in unixes but not in Windows. Default is 'on'.
'on' - use colors in unixes but not in Windows
'off' - never use colors
'force' - always use colors (also in Windows)
-E --escape what:with * Escape characters which are problematic in console.
'what' is the name of the character to escape and
'with' is the string to escape it with. Note that
all given arguments, incl. data sources, are escaped
so escape characters ought to be selected carefully.
<---------------------ESCAPES----------------------->
Examples:
--escape space:_ --metadata X:Value_with_spaces
-E space:SP -E quot:Q -v var:QhelloSPworldQ
-A --argumentfile path * Text file to read more arguments from. File can have
both options and data sources one per line. Contents
don't need to be escaped but spaces in the beginning
and end of lines are removed. Empty lines and lines
starting with a hash character (#) are ignored.
Example file:
| --include regression
| --name Regression Tests
| # This is a comment line
| my_tests.html
| path/to/test/directory/
-h -? --help Print usage instructions.
--version Print version information.
Options that are marked with an asterisk (*) can be specified multiple times.
For example '--test first --test third' selects test cases with name 'first'
and 'third'. If other options are given multiple times, the last value is used.
Long option format is case-insensitive. For example --SuiteStatLevel is
equivalent to, but easier to read than, --suitestatlevel. Long options can
also be shortened as long as they are unique. For example '--logti Title' works
while '--lo log.html' does not because the former matches only --logtitle but
latter matches both --log and --logtitle.
Environment Variables:
ROBOT_SYSLOG_FILE Path to the syslog file. If not specified, or set to
special value 'NONE', writing to syslog file is
disabled. Path must be absolute.
ROBOT_SYSLOG_LEVEL Log level to use when writing to the syslog file.
Available levels are the same as for --loglevel
option to Robot and the default is INFO.
Examples:
# Simple Rebot run that creates log and report with default names.
$ rebot output.xml
# Using options. Note that this is one long command split into multiple lines.
$ rebot --log none --report smoke.html --reporttitle Smoke_Tests
--reportbackground green:yellow:red --include smoke
--TagStatCombine tag1ANDtag2 path/to/myoutput.xml
# Running 'robot/rebot.py' directly and creating combined outputs.
$ python /path/robot/rebot.py -N Project_X -l x.html -r x.html outputs/*.xml
"""
import sys
try:
import pythonpathsetter
except ImportError:
# Get here when run as 'python -m robot.rebot' and then importing robot
# works without this and pythonpathsetter is imported again later.
pass
import robot
if __name__ == '__main__':
rc = robot.rebot_from_cli(sys.argv[1:], __doc__)
sys.exit(rc)
| [
[
8,
0,
0.502,
0.8735,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.9447,
0.004,
0,
0.66,
0.25,
509,
0,
1,
0,
0,
509,
0,
0
],
[
7,
0,
0.9625,
0.0237,
0,
0.66,
... | [
"\"\"\"Rebot -- Robot Framework Report and Log Generator\n\nVersion: <VERSION>\n\nUsage: rebot [options] robot_outputs\n or: interpreter /path/robot/rebot.py [options] robot_outputs\n or python -m robot.rebot [options] robot_outputs",
"import sys",
"try:\n import pythonpathsetter\nexcept ImportError:... |
#!/usr/bin/env python
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robot Framework -- A keyword-driven test automation framework
Version: <VERSION>
Usage: pybot [options] data_sources
or: jybot [options] data_sources
or: interpreter /path/robot/runner.py [options] data_sources
or: python -m robot.runner [options] data_sources
Robot Framework is a Python-based keyword-driven test automation framework for
acceptance level testing and acceptance test-driven development (ATDD). It has
an easy-to-use tabular syntax for creating test cases and its testing
capabilities can be extended by test libraries implemented either with Python
or Java. Users can also create new keywords from existing ones using the same
simple syntax that is used for creating test cases.
Robot Framework has two start-up scripts, 'pybot' and 'jybot', which run it on
Python and Jython interpreters, respectively. Alternatively it is possible to
directly call 'robot/runner.py' script using a selected interpreter.
Data sources given to Robot Framework are either test case files or directories
containing them and/or other directories. Single test case file creates a test
suite containing all the test cases in it and a directory containing test case
files creates a higher level test suite with test case files or other
directories as sub test suites. If multiple data sources are given, a virtual
test suite containing suites generated from given data sources is created.
By default Robot Framework creates an XML output file and a log and a report in
HTML format, but this can be configured using various options listed below.
Outputs in HTML format are for human consumption and XML output for integration
with other systems. XML outputs can also be combined and otherwise further
processed with Rebot tool. Run 'rebot --help' for more information.
Robot Framework is open source software released under Apache License 2.0.
Its copyrights are owned and development supported by Nokia Siemens Networks.
For more information about the framework see http://robotframework.org.
Options:
-N --name name Set the name of the top level test suite. Underscores
in the name are converted to spaces. Default name is
created from the name of the executed data source.
-D --doc documentation Set the documentation of the top level test suite.
Underscores in the documentation are converted to
spaces and it may also contain simple HTML formatting
(e.g. *bold* and http://url/).
-M --metadata name:value * Set metadata of the top level test suite.
Underscores in the name and value are converted to
spaces. Value can contain same HTML formatting as
--doc. Example: '--metadata version:1.2'
-G --settag tag * Sets given tag(s) to all executed test cases.
-t --test name * Select test cases to run by name or long name. Name
is case and space insensitive and it can also be a
simple pattern where '*' matches anything and '?'
matches any char. If using '*' and '?' in the console
is problematic see --escape and --argumentfile.
-s --suite name * Select test suites to run by name. When this option
is used with --test, --include or --exclude, only
test cases in matching suites and also matching other
filtering criteria are selected. Name can be a simple
pattern similarly as with --test and it can contain
parent name separated with a dot. For example
'-s X.Y' selects suite 'Y' only if its parent is 'X'.
-i --include tag * Select test cases to run by tag. Similarly as name in
--test, tag is case and space insensitive. There are
three ways to include test based on tags:
1) One tag as a simple pattern. Tests having a tag
matching the pattern are included. Example: 'it-*'
2) Two or more tags (or patterns) separated by '&' or
'AND'. Only tests having all these tags are included.
Examples: 'tag1&tag2', 'smokeANDowner-*ANDit-10'
3) Two or more tags (or patterns) separated by 'NOT'.
Tests having the first tag but not any of the latter
ones are included. Example: 'it-10NOTsmoke'
-e --exclude tag * Select test cases not to run by tag. These tests are
not run even if they are included with --include.
Tags are excluded using the rules explained in
--include.
-c --critical tag * Tests having given tag are considered critical. If no
critical tags are set, all tags are critical. Tags
can be given as a pattern like e.g. with --test.
-n --noncritical tag * Tests with given tag are not critical even if they
have a tag set with --critical. Tag can be a pattern.
-v --variable name:value * Set variables in the test data. Only scalar
variables are supported and name is given without
'${}'. See --escape for how to use special characters
and --variablefile for a more powerful variable
setting mechanism that allows also list variables.
Examples:
--variable str:Hello => ${str} = 'Hello'
-v str:Hi_World -E space:_ => ${str} = 'Hi World'
-v x: -v y:42 => ${x} = '', ${y} = '42'
-V --variablefile path * File to read variables from (e.g. 'path/vars.py').
Example file:
| import random
| __all__ = ['scalar','LIST__var','integer']
| scalar = 'Hello world!'
| LIST__var = ['Hello','list','world']
| integer = random.randint(1,10)
=>
${scalar} = 'Hello world!'
@{var} = ['Hello','list','world']
${integer} = <random integer from 1 to 10>
-d --outputdir dir Where to create output files. The default is the
directory where tests are run from and the given path
is considered relative to that unless it is absolute.
-o --output file XML output file. Given path, similarly as paths given
to --log, --report, --debugfile and --xunitfile, is
relative to --outputdir unless given as an absolute
path. Other output files are created based on XML
output files after the test execution and XML outputs
can also be further processed with Rebot tool. Can be
disabled by giving a special value 'NONE'. In this
case, also log and report are automatically disabled.
Default: output.xml
-l --log file HTML log file. Can be disabled by giving a special
value 'NONE'. Default: log.html
Examples: '--log mylog.html', '-l NONE'
-r --report file HTML report file. Can be disabled with 'NONE'
similarly as --log. Default: report.html
-S --summary file Not supported in Robot Framework 2.6 or newer.
-x --xunitfile file xUnit compatible result file. Not created unless this
option is specified.
-b --debugfile file Debug file written during execution. Not created
unless this option is specified.
-T --timestampoutputs When this option is used, timestamp in a format
'YYYYMMDD-hhmmss' is added to all generated output
files between their basename and extension. For
example '-T -o output.xml -r report.html -l none'
creates files like 'output-20070503-154410.xml' and
'report-20070503-154410.html'.
--splitlog TODO: doc
--splitoutputs level Not supported in Robot Framework 2.6 or newer.
--logtitle title Title for the generated test log. The default title
is '<Name Of The Suite> Test Log'. Underscores in
the title are converted into spaces in all titles.
--reporttitle title Title for the generated test report. The default
title is '<Name Of The Suite> Test Report'.
--summarytitle title Not supported in Robot Framework 2.6 or newer.
--reportbackground colors Background colors to use in the report file.
Either 'all_passed:critical_passed:failed' or
'passed:failed'. Both color names and codes work.
Examples: --reportbackground green:yellow:red
--reportbackground #00E:#E00
-L --loglevel level Threshold level for logging. Available levels:
TRACE, DEBUG, INFO (default), WARN, NONE (no logging)
--suitestatlevel level How many levels to show in 'Statistics by Suite'
in log and report. By default all suite levels are
shown. Example: --suitestatlevel 3
--tagstatinclude tag * Include only matching tags in 'Statistics by Tag'
and 'Test Details' in log and report. By default all
tags set in test cases are shown. Given 'tag' can
also be a simple pattern (see e.g. --test).
--tagstatexclude tag * Exclude matching tags from 'Statistics by Tag' and
'Test Details'. This option can be used with
--tagstatinclude similarly as --exclude is used with
--include.
--tagstatcombine tags:name * Create combined statistics based on tags.
These statistics are added into 'Statistics by Tag'
and matching tests into 'Test Details'. If optional
'name' is not given, name of the combined tag is got
from the specified tags. Tags are combined using the
rules explained in --include.
Examples: --tagstatcombine tag1ANDtag2:My_name
--tagstatcombine requirement-*
--tagdoc pattern:doc * Add documentation to tags matching given pattern.
Documentation is shown in 'Test Details' and also as
a tooltip in 'Statistics by Tag'. Pattern can contain
characters '*' (matches anything) and '?' (matches
any char). Documentation can contain formatting
similarly as with --doc option.
Examples: --tagdoc mytag:My_documentation
--tagdoc regression:*See*_http://info.html
--tagdoc owner-*:Original_author
--tagstatlink pattern:link:title * Add external links into 'Statistics by
Tag'. Pattern can contain characters '*' (matches
anything) and '?' (matches any char). Characters
matching to wildcard expressions can be used in link
and title with syntax %N, where N is index of the
match (starting from 1). In title underscores are
automatically converted to spaces.
Examples: --tagstatlink mytag:http://my.domain:Link
--tagstatlink bug-*:http://tracker/id=%1:Bug_Tracker
--listener class * A class for monitoring test execution. Gets
notifications e.g. when a test case starts and ends.
Arguments to listener class can be given after class
name, using colon as separator. For example:
--listener MyListenerClass:arg1:arg2
--warnonskippedfiles If this option is used, skipped files will cause a
warning that is visible to console output and log
files. By default skipped files only cause an info
level syslog message.
--nostatusrc Sets the return code to zero regardless of failures
in test cases. Error codes are returned normally.
--runemptysuite Executes tests also if the top level test suite is
empty. Useful e.g. with --include/--exclude when it
is not an error that no test matches the condition.
--runmode mode * Possible values are 'Random:Test', 'Random:Suite',
'Random:All', 'ExitOnFailure', 'SkipTeardownOnExit',
and 'DryRun' (case-insensitive). First three change
the execution order of tests, suites, or both.
'ExitOnFailure' stops test execution if a critical
test fails. 'SkipTeardownOnExit' causes teardowns to
be skipped if test execution is stopped prematurely.
In the 'DryRun' test data is verified and tests run
so that library keywords are not executed.
-W --monitorwidth chars Width of the monitor output. Default is 78.
-C --monitorcolors auto|on|off Use colors on console output or not.
auto: use colors when output not redirected (default)
on: always use colors
off: never use colors
Note that colors do not work with Jython on Windows.
-P --pythonpath path * Additional locations (directories, ZIPs, JARs) where
to search test libraries from when they are imported.
Multiple paths can be given by separating them with a
colon (':') or using this option several times. Given
path can also be a glob pattern matching multiple
paths but then it normally must be escaped or quoted.
Examples:
--pythonpath libs/
--pythonpath /opt/testlibs:mylibs.zip:yourlibs
-E star:STAR -P lib/STAR.jar -P mylib.jar
-E --escape what:with * Escape characters which are problematic in console.
'what' is the name of the character to escape and
'with' is the string to escape it with. Note that
all given arguments, incl. data sources, are escaped
so escape characters ought to be selected carefully.
<--------------------ESCAPES------------------------>
Examples:
--escape space:_ --metadata X:Value_with_spaces
-E space:SP -E quot:Q -v var:QhelloSPworldQ
-A --argumentfile path * Text file to read more arguments from. Use special
path 'STDIN' to read contents from the standard input
stream. File can have both options and data sources
one per line. Contents do not need to be escaped but
spaces in the beginning and end of lines are removed.
Empty lines and lines starting with a hash character
(#) are ignored.
Example file:
| --include regression
| --name Regression Tests
| # This is a comment line
| my_tests.html
| path/to/test/directory/
Examples:
--argumentfile argfile.txt --argumentfile STDIN
-h -? --help Print usage instructions.
--version Print version information.
Options that are marked with an asterisk (*) can be specified multiple times.
For example '--test first --test third' selects test cases with name 'first'
and 'third'. If other options are given multiple times, the last value is used.
Long option format is case-insensitive. For example --SuiteStatLevel is
equivalent to, but easier to read than, --suitestatlevel. Long options can
also be shortened as long as they are unique. For example '--logle DEBUG' works
while '--lo log.html' does not because the former matches only --loglevel but
latter matches --log, --logtitle and --loglevel.
Environment Variables:
ROBOT_SYSLOG_FILE Path to the syslog file. If not specified, or set to
special value 'NONE', writing to syslog file is
disabled. Path must be absolute.
ROBOT_SYSLOG_LEVEL Log level to use when writing to the syslog file.
Available levels are the same as for --loglevel
option and the default is INFO.
Examples:
# Simple test run with 'pybot' without options.
$ pybot tests.html
# Using options and running with 'jybot'.
$ jybot --include smoke --name Smoke_Tests /path/to/tests.html
# Running 'robot/runner.py' directly and using test data in TSV format.
$ python /path/to/robot/runner.py tests.tsv
# Using custom start-up script, giving multiple options and executing a dir.
$ runtests.sh --test test1 --test test2 testdir/
# Executing multiple data sources and using case-insensitive long options.
$ pybot --SuiteStatLevel 2 /my/tests/*.html /your/tests.html
# Setting syslog file before running tests.
$ export ROBOT_SYSLOG_FILE=/tmp/syslog.txt
$ pybot tests.html
"""
import sys
try:
import pythonpathsetter
except ImportError:
# Get here when run as 'python -m robot.runner' and then importing robot
# works without this and pythonpathsetter is imported again later.
pass
import robot
if __name__ == '__main__':
rc = robot.run_from_cli(sys.argv[1:], __doc__)
sys.exit(rc)
| [
[
8,
0,
0.5016,
0.9003,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.9564,
0.0031,
0,
0.66,
0.25,
509,
0,
1,
0,
0,
509,
0,
0
],
[
7,
0,
0.9704,
0.0187,
0,
0.66,
... | [
"\"\"\"Robot Framework -- A keyword-driven test automation framework\n\nVersion: <VERSION>\n\nUsage: pybot [options] data_sources\n or: jybot [options] data_sources\n or: interpreter /path/robot/runner.py [options] data_sources\n or: python -m robot.runner [options] data_sources",
"import sys",
"try:\... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
class UserErrorHandler:
"""Created if creating handlers fail -- running raises DataError.
The idea is not to raise DataError at processing time and prevent all
tests in affected test case file from executing. Instead UserErrorHandler
is created and if it is ever run DataError is raised then.
"""
type = 'error'
def __init__(self, name, error):
self.name = self.longname = name
self.doc = self.shortdoc = ''
self._error = error
self.timeout = ''
def init_keyword(self, varz):
pass
def run(self, *args):
raise DataError(self._error)
| [
[
1,
0,
0.4054,
0.027,
0,
0.66,
0,
299,
0,
1,
0,
0,
299,
0,
0
],
[
3,
0,
0.7432,
0.5405,
0,
0.66,
1,
290,
0,
3,
0,
0,
0,
0,
1
],
[
8,
1,
0.5811,
0.1622,
1,
0.41,
... | [
"from robot.errors import DataError",
"class UserErrorHandler:\n \"\"\"Created if creating handlers fail -- running raises DataError.\n\n The idea is not to raise DataError at processing time and prevent all\n tests in affected test case file from executing. Instead UserErrorHandler\n is created and i... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class BaseKeyword:
def __init__(self, name='', args=None, doc='', timeout='', type='kw'):
self.name = name
self.args = args or []
self.doc = doc
self.timeout = timeout
self.type = type
self.status = 'NOT_RUN'
def serialize(self, serializer):
serializer.start_keyword(self)
serializer.end_keyword(self)
| [
[
3,
0,
0.7857,
0.4643,
0,
0.66,
0,
776,
0,
2,
0,
0,
0,
0,
2
],
[
2,
1,
0.75,
0.25,
1,
0.88,
0,
555,
0,
6,
0,
0,
0,
0,
0
],
[
14,
2,
0.6786,
0.0357,
2,
0.09,
0,... | [
"class BaseKeyword:\n\n def __init__(self, name='', args=None, doc='', timeout='', type='kw'):\n self.name = name\n self.args = args or []\n self.doc = doc\n self.timeout = timeout\n self.type = type",
" def __init__(self, name='', args=None, doc='', timeout='', type='kw')... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from statistics import Stat
from robot import utils
from robot.errors import DataError
class _TestAndSuiteHelper:
def __init__(self, name, parent=None):
self.name = name
self.doc = ''
self.parent = parent
self.setup = None
self.teardown = None
self.status = 'NOT_RUN'
self.message = ''
# TODO: Is this property and other html/serialize stuff here used anymore?
@property
def htmldoc(self):
return utils.html_format(self.doc)
# TODO: Replace with simple @property in 2.7.
# Cannot do that now because Mabot assigns longname.
_longname = None
longname = property(lambda self: self._longname or self.get_long_name(),
lambda self, name: setattr(self, '_longname', name))
# TODO: Is separator still used?
def get_long_name(self, separator='.'):
"""Returns long name. If separator is None, list of names is returned."""
names = self.parent and self.parent.get_long_name(separator=None) or []
names.append(self.name)
if separator:
return separator.join(names)
return names
def _set_teardown_fail_msg(self, message):
if self.message == '':
self.message = message
else:
self.message += '\n\nAlso ' + message[0].lower() + message[1:]
def __str__(self):
return self.name
def __repr__(self):
return repr(self.name)
class BaseTestSuite(_TestAndSuiteHelper):
"""Base class for TestSuite used in runtime and by rebot."""
def __init__(self, name, source=None, parent=None):
_TestAndSuiteHelper.__init__(self, name, parent)
self.source = source is not None and utils.abspath(source) or None
self.metadata = utils.NormalizedDict()
self.suites = []
self.tests = []
self.critical = _Critical()
self.critical_stats = Stat()
self.all_stats = Stat()
if parent:
parent.suites.append(self)
def set_name(self, name):
if name:
self.name = name
elif not self.parent and self.name == '': # MultiSourceSuite
self.name = ' & '.join([suite.name for suite in self.suites])
def set_critical_tags(self, critical, non_critical):
if critical is not None or non_critical is not None:
self.critical.set(critical, non_critical)
self._set_critical_tags(self.critical)
def _set_critical_tags(self, critical):
self.critical = critical
for suite in self.suites:
suite._set_critical_tags(critical)
for test in self.tests:
test.set_criticality(critical)
def set_doc(self, doc):
if doc:
self.doc = doc
def set_metadata(self, metalist):
for name, value in metalist:
self.metadata[name] = value
def get_metadata(self, html=False):
names = sorted(self.metadata.keys())
values = [self.metadata[n] for n in names]
if html:
values = [utils.html_format(v) for v in values]
return zip(names, values)
def get_test_count(self):
count = len(self.tests)
for suite in self.suites:
count += suite.get_test_count()
return count
def get_full_message(self, html=False):
"""Returns suite's message including statistics message"""
stat_msg = self.get_stat_message(html)
if not self.message:
return stat_msg
if not html:
return '%s\n\n%s' % (self.message, stat_msg)
return '%s<br /><br />%s' % (utils.html_escape(self.message), stat_msg)
def get_stat_message(self, html=False):
ctotal, cend, cpass, cfail = self._get_counts(self.critical_stats)
atotal, aend, apass, afail = self._get_counts(self.all_stats)
msg = ('%%d critical test%%s, %%d passed, %(cfail)s%%d failed%(end)s\n'
'%%d test%%s total, %%d passed, %(afail)s%%d failed%(end)s')
if html:
msg = msg.replace(' ', ' ').replace('\n', '<br />')
msg = msg % {'cfail': '<span%s>' % (cfail and ' class="fail"' or ''),
'afail': '<span%s>' % (afail and ' class="fail"' or ''),
'end': '</span>'}
else:
msg = msg % {'cfail': '', 'afail': '', 'end': ''}
return msg % (ctotal, cend, cpass, cfail, atotal, aend, apass, afail)
def _get_counts(self, stat):
total = stat.passed + stat.failed
ending = utils.plural_or_not(total)
return total, ending, stat.passed, stat.failed
def set_status(self):
"""Sets status and statistics based on subsuite and test statuses.
Can/should be used when statuses have been changed somehow.
"""
self.status = self._set_stats()
def _set_stats(self):
self.critical_stats = Stat()
self.all_stats = Stat()
for suite in self.suites:
suite.set_status()
self._add_suite_to_stats(suite)
for test in self.tests:
self._add_test_to_stats(test)
return self._get_status()
def _get_status(self):
return 'PASS' if not self.critical_stats.failed else 'FAIL'
def _add_test_to_stats(self, test):
self.all_stats.add_test(test)
if test.critical == 'yes':
self.critical_stats.add_test(test)
def _add_suite_to_stats(self, suite):
self.critical_stats.add_stat(suite.critical_stats)
self.all_stats.add_stat(suite.all_stats)
def suite_teardown_failed(self, message=None):
if message:
self._set_teardown_fail_msg(message)
self.critical_stats.fail_all()
self.all_stats.fail_all()
self.status = self._get_status()
sub_message = 'Teardown of the parent suite failed.'
for suite in self.suites:
suite.suite_teardown_failed(sub_message)
for test in self.tests:
test.suite_teardown_failed(sub_message)
def set_tags(self, tags):
if tags:
for test in self.tests:
test.tags = utils.normalize_tags(test.tags + tags)
for suite in self.suites:
suite.set_tags(tags)
def filter(self, suites=None, tests=None, includes=None, excludes=None,
zero_tests_ok=False):
self.filter_by_names(suites, tests, zero_tests_ok)
self.filter_by_tags(includes, excludes, zero_tests_ok)
def filter_by_names(self, suites=None, tests=None, zero_tests_ok=False):
if not (suites or tests):
return
suites = [([], name.split('.')) for name in suites or []]
tests = tests or []
if not self._filter_by_names(suites, tests) and not zero_tests_ok:
self._raise_no_tests_filtered_by_names(suites, tests)
def _filter_by_names(self, suites, tests):
suites = self._filter_suite_names(suites)
self.suites = [suite for suite in self.suites
if suite._filter_by_names(suites, tests)]
if not suites:
self.tests = [test for test in self.tests if tests == [] or
any(utils.matches_any(name, tests, ignore=['_'])
for name in [test.name, test.get_long_name()])]
else:
self.tests = []
return bool(self.suites or self.tests)
def _filter_suite_names(self, suites):
try:
return [self._filter_suite_name(p, s) for p, s in suites]
except StopIteration:
return []
def _filter_suite_name(self, parent, suite):
if utils.matches(self.name, suite[0], ignore=['_']):
if len(suite) == 1:
raise StopIteration('Match found')
return (parent + [suite[0]], suite[1:])
return ([], parent + suite)
def _raise_no_tests_filtered_by_names(self, suites, tests):
tests = utils.seq2str(tests, lastsep=' or ')
suites = utils.seq2str(['.'.join(p + s) for p, s in suites],
lastsep=' or ')
if not suites:
msg = 'test cases named %s.' % tests
elif not tests:
msg = 'test suites named %s.' % suites
else:
msg = 'test cases %s in suites %s.' % (tests, suites)
raise DataError("Suite '%s' contains no %s" % (self.name, msg))
def filter_by_tags(self, includes=None, excludes=None, zero_tests_ok=False):
if not (includes or excludes):
return
includes = includes or []
excludes = excludes or []
if not self._filter_by_tags(includes, excludes) and not zero_tests_ok:
self._raise_no_tests_filtered_by_tags(includes, excludes)
def _filter_by_tags(self, incls, excls):
self.suites = [suite for suite in self.suites
if suite._filter_by_tags(incls, excls)]
self.tests = [test for test in self.tests
if test.is_included(incls, excls)]
return bool(self.suites or self.tests)
def _raise_no_tests_filtered_by_tags(self, incls, excls):
incl = utils.seq2str(incls)
excl = utils.seq2str(excls)
msg = "Suite '%s' with " % self.name
if incl:
msg += 'includes %s ' % incl
if excl:
msg += 'and '
if excl:
msg += 'excludes %s ' % excl
raise DataError(msg + 'contains no test cases.')
def set_runmode(self, runmode):
runmode = runmode.upper()
if runmode == 'EXITONFAILURE':
self._run_mode_exit_on_failure = True
elif runmode == 'SKIPTEARDOWNONEXIT':
self._run_mode_skip_teardowns_on_exit = True
elif runmode == 'DRYRUN':
self._run_mode_dry_run = True
elif runmode == 'RANDOM:TEST':
random.shuffle(self.tests)
elif runmode == 'RANDOM:SUITE':
random.shuffle(self.suites)
elif runmode == 'RANDOM:ALL':
random.shuffle(self.suites)
random.shuffle(self.tests)
else:
return
for suite in self.suites:
suite.set_runmode(runmode)
def set_options(self, settings):
self.set_tags(settings['SetTag'])
self.filter(settings['SuiteNames'], settings['TestNames'],
settings['Include'], settings['Exclude'],
settings['RunEmptySuite'])
self.set_name(settings['Name'])
self.set_doc(settings['Doc'])
self.set_metadata(settings['Metadata'])
self.set_critical_tags(settings['Critical'], settings['NonCritical'])
self._return_status_rc = not settings['NoStatusRC']
if 'RunMode' in settings:
map(self.set_runmode, settings['RunMode'])
if 'RemoveKeywords' in settings:
self.remove_keywords(settings['RemoveKeywords'])
def serialize(self, serializer):
serializer.start_suite(self)
if self.setup is not None:
self.setup.serialize(serializer)
if self.teardown is not None:
self.teardown.serialize(serializer)
for suite in self.suites:
suite.serialize(serializer)
for test in self.tests:
test.serialize(serializer)
serializer.end_suite(self)
@property
def return_code(self):
rc = min(self.critical_stats.failed, 250)
return rc if self._return_status_rc else 0
class BaseTestCase(_TestAndSuiteHelper):
def __init__(self, name, parent):
_TestAndSuiteHelper.__init__(self, name, parent)
self.critical = 'yes'
if parent:
parent.tests.append(self)
def suite_teardown_failed(self, message):
self.status = 'FAIL'
self._set_teardown_fail_msg(message)
def set_criticality(self, critical):
self.critical = 'yes' if critical.are_critical(self.tags) else 'no'
def is_included(self, incl_tags, excl_tags):
"""Returns True if this test case is included but not excluded.
If no 'incl_tags' are given all tests are considered to be included.
"""
included = not incl_tags or self._matches_any_tag_rule(incl_tags)
excluded = self._matches_any_tag_rule(excl_tags)
return included and not excluded
def _matches_any_tag_rule(self, tag_rules):
"""Returns True if any of tag_rules matches self.tags
Matching equals supporting AND, & and NOT boolean operators and simple
pattern matching. NOT is 'or' operation meaning if any of the NOTs is
matching, False is returned.
"""
return any(self._matches_tag_rule(rule) for rule in tag_rules)
def _matches_tag_rule(self, tag_rule):
if 'NOT' not in tag_rule:
return self._matches_tag(tag_rule)
nots = tag_rule.split('NOT')
should_match = nots.pop(0)
return self._matches_tag(should_match) \
and not any(self._matches_tag(n) for n in nots)
def _matches_tag(self, tag):
"""Returns True if given tag matches any tag from self.tags.
Note that given tag may be ANDed combination of multiple tags (e.g.
tag1&tag2) and then all of them must match some tag from self.tags.
"""
for item in tag.split('&'):
if not any(utils.matches(t, item, ignore=['_']) for t in self.tags):
return False
return True
def __cmp__(self, other):
if self.status != other.status:
return -1 if self.status == 'FAIL' else 1
if self.critical != other.critical:
return -1 if self.critical == 'yes' else 1
try:
return cmp(self.longname, other.longname)
except AttributeError:
return cmp(self.name, other.name)
def serialize(self, serializer):
serializer.start_test(self)
if self.setup is not None:
self.setup.serialize(serializer)
for kw in self.keywords:
kw.serialize(serializer)
if self.teardown is not None:
self.teardown.serialize(serializer)
serializer.end_test(self)
class _Critical:
def __init__(self, tags=None, nons=None):
self.set(tags, nons)
def set(self, tags, nons):
self.tags = utils.normalize_tags(tags or [])
self.nons = utils.normalize_tags(nons or [])
def is_critical(self, tag):
return utils.matches_any(tag, self.tags)
def is_non_critical(self, tag):
return utils.matches_any(tag, self.nons)
def are_critical(self, tags):
for tag in tags:
if self.is_non_critical(tag):
return False
for tag in tags:
if self.is_critical(tag):
return True
return not self.tags
| [
[
1,
0,
0.0356,
0.0024,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0404,
0.0024,
0,
0.66,
0.1429,
35,
0,
1,
0,
0,
35,
0,
0
],
[
1,
0,
0.0428,
0.0024,
0,
0.... | [
"import random",
"from statistics import Stat",
"from robot import utils",
"from robot.errors import DataError",
"class _TestAndSuiteHelper:\n\n def __init__(self, name, parent=None):\n self.name = name\n self.doc = ''\n self.parent = parent\n self.setup = None\n self.t... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robot import utils
class Statistics:
def __init__(self, suite, suite_stat_level=-1, tag_stat_include=None,
tag_stat_exclude=None, tag_stat_combine=None, tag_doc=None,
tag_stat_link=None):
self.tags = TagStatistics(tag_stat_include, tag_stat_exclude,
tag_stat_combine, tag_doc, tag_stat_link)
self.suite = SuiteStatistics(suite, self.tags, suite_stat_level)
self.total = TotalStatistics(self.suite)
self.tags.sort()
def serialize(self, serializer):
serializer.start_statistics(self)
self.total.serialize(serializer)
self.tags.serialize(serializer)
self.suite.serialize(serializer)
serializer.end_statistics(self)
class Stat:
def __init__(self, name=''):
self.name = name
self.passed = 0
self.failed = 0
def add_stat(self, other):
self.passed += other.passed
self.failed += other.failed
def add_test(self, test):
if test.status == 'PASS':
self.passed += 1
else:
self.failed += 1
def fail_all(self):
self.failed += self.passed
self.passed = 0
def __cmp__(self, other):
return cmp(self.name, other.name)
def __nonzero__(self):
return self.failed == 0
class SuiteStat(Stat):
type = 'suite'
def __init__(self, suite):
Stat.__init__(self, suite.name)
self.long_name = suite.get_long_name()
def serialize(self, serializer):
serializer.suite_stat(self)
class TagStat(Stat):
type = 'tag'
def __init__(self, name, doc='', links=[], critical=False,
non_critical=False, combined=''):
Stat.__init__(self, name)
self.doc = doc
self.links = links
self.critical = critical
self.non_critical = non_critical
self.combined = combined
self.tests = []
def add_test(self, test):
Stat.add_test(self, test)
self.tests.append(test)
def __cmp__(self, other):
if self.critical != other.critical:
return cmp(other.critical, self.critical)
if self.non_critical != other.non_critical:
return cmp(other.non_critical, self.non_critical)
if bool(self.combined) != bool(other.combined):
return cmp(bool(other.combined), bool(self.combined))
return cmp(self.name, other.name)
def serialize(self, serializer):
serializer.tag_stat(self)
class TotalStat(Stat):
type = 'total'
def __init__(self, name, suite_stat):
Stat.__init__(self, name)
self.passed = suite_stat.passed
self.failed = suite_stat.failed
def serialize(self, serializer):
serializer.total_stat(self)
class SuiteStatistics:
def __init__(self, suite, tag_stats, suite_stat_level=-1):
self.all = SuiteStat(suite)
self.critical = SuiteStat(suite)
self.suites = []
self._process_suites(suite, tag_stats)
self._process_tests(suite, tag_stats)
self._suite_stat_level = suite_stat_level
def _process_suites(self, suite, tag_stats):
for subsuite in suite.suites:
substat = SuiteStatistics(subsuite, tag_stats)
self.suites.append(substat)
self.all.add_stat(substat.all)
self.critical.add_stat(substat.critical)
def _process_tests(self, suite, tag_stats):
for test in suite.tests:
self.all.add_test(test)
if test.critical == 'yes':
self.critical.add_test(test)
tag_stats.add_test(test, suite.critical)
def serialize(self, serializer):
serializer.start_suite_stats(self)
self._serialize(serializer, self._suite_stat_level)
serializer.end_suite_stats(self)
def _serialize(self, serializer, max_suite_level, suite_level=1):
self.all.serialize(serializer)
if max_suite_level < 0 or max_suite_level > suite_level:
for suite in self.suites:
suite._serialize(serializer, max_suite_level, suite_level+1)
class TagStatistics:
def __init__(self, include=None, exclude=None, combine=None, docs=None,
links=None):
self.stats = utils.NormalizedDict()
self._include = include or []
self._exclude = exclude or []
self._combine = combine or []
info = TagStatInfo(docs or [], links or [])
self._get_doc = info.get_doc
self._get_links = info.get_links
def add_test(self, test, critical):
self._add_tags_statistics(test, critical)
self._add_combined_statistics(test)
def _add_tags_statistics(self, test, critical):
for tag in test.tags:
if not self._is_included(tag):
continue
if tag not in self.stats:
self.stats[tag] = TagStat(tag, self._get_doc(tag),
self._get_links(tag),
critical.is_critical(tag),
critical.is_non_critical(tag))
self.stats[tag].add_test(test)
def _is_included(self, tag):
if self._include and not utils.matches_any(tag, self._include):
return False
return not utils.matches_any(tag, self._exclude)
def _add_combined_statistics(self, test):
for pattern, name in self._combine:
name = name or pattern
if name not in self.stats:
self.stats[name] = TagStat(name, self._get_doc(name),
self._get_links(name),
combined=pattern)
if test.is_included([pattern], []):
self.stats[name].add_test(test)
def serialize(self, serializer):
serializer.start_tag_stats(self)
for stat in sorted(self.stats.values()):
stat.serialize(serializer)
serializer.end_tag_stats(self)
def sort(self):
for stat in self.stats.values():
stat.tests.sort()
class TotalStatistics:
def __init__(self, suite):
self.critical = TotalStat('Critical Tests', suite.critical)
self.all = TotalStat('All Tests', suite.all)
def serialize(self, serializer):
serializer.start_total_stats(self)
self.critical.serialize(serializer)
self.all.serialize(serializer)
serializer.end_total_stats(self)
class TagStatInfo:
def __init__(self, docs, links):
self._docs = [TagStatDoc(*doc) for doc in docs]
self._links = [TagStatLink(*link) for link in links]
def get_doc(self, tag):
return ' & '.join(doc.text for doc in self._docs if doc.matches(tag))
def get_links(self, tag):
return [link.get_link(tag) for link in self._links if link.matches(tag)]
class TagStatDoc:
def __init__(self, pattern, doc):
self.text = doc
self._pattern = pattern
def matches(self, tag):
return utils.matches(tag, self._pattern)
class TagStatLink:
_match_pattern_tokenizer = re.compile('(\*|\?)')
def __init__(self, pattern, link, title):
self._regexp = self._get_match_regexp(pattern)
self._link = link
self._title = title.replace('_', ' ')
def matches(self, tag):
return self._regexp.match(tag) is not None
def get_link(self, tag):
match = self._regexp.match(tag)
if not match:
return None
link, title = self._replace_groups(self._link, self._title, match)
return link, title
def _replace_groups(self, link, title, match):
for index, group in enumerate(match.groups()):
placefolder = '%' + str(index+1)
link = link.replace(placefolder, group)
title = title.replace(placefolder, group)
return link, title
def _get_match_regexp(self, pattern):
regexp = []
open_parenthesis = False
for token in self._match_pattern_tokenizer.split(pattern):
if token == '':
continue
if token == '?':
if not open_parenthesis:
regexp.append('(')
open_parenthesis = True
regexp.append('.')
continue
if open_parenthesis:
regexp.append(')')
open_parenthesis = False
if token == '*':
regexp.append('(.*)')
continue
regexp.append(re.escape(token))
if open_parenthesis:
regexp.append(')')
return re.compile('^%s$' % ''.join(regexp), re.IGNORECASE)
| [
[
1,
0,
0.0515,
0.0034,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0584,
0.0034,
0,
0.66,
0.0833,
735,
0,
1,
0,
0,
735,
0,
0
],
[
3,
0,
0.0962,
0.0584,
0,
... | [
"import re",
"from robot import utils",
"class Statistics:\n\n def __init__(self, suite, suite_stat_level=-1, tag_stat_include=None,\n tag_stat_exclude=None, tag_stat_combine=None, tag_doc=None,\n tag_stat_link=None):\n self.tags = TagStatistics(tag_stat_include, tag_st... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
class BaseLibrary:
def get_handler(self, name):
try:
return self.handlers[name]
except KeyError:
raise DataError("No keyword handler with name '%s' found" % name)
def has_handler(self, name):
return self.handlers.has_key(name)
def __len__(self):
return len(self.handlers)
| [
[
1,
0,
0.5161,
0.0323,
0,
0.66,
0,
299,
0,
1,
0,
0,
299,
0,
0
],
[
3,
0,
0.8065,
0.4194,
0,
0.66,
1,
411,
0,
3,
0,
0,
0,
0,
3
],
[
2,
1,
0.7419,
0.1613,
1,
0.82,
... | [
"from robot.errors import DataError",
"class BaseLibrary:\n\n def get_handler(self, name):\n try:\n return self.handlers[name]\n except KeyError:\n raise DataError(\"No keyword handler with name '%s' found\" % name)",
" def get_handler(self, name):\n try:\n ... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from model import BaseTestSuite, BaseTestCase
from keyword import BaseKeyword
from handlers import UserErrorHandler
from libraries import BaseLibrary
from statistics import Statistics
| [
[
1,
0,
0.8,
0.05,
0,
0.66,
0,
722,
0,
2,
0,
0,
722,
0,
0
],
[
1,
0,
0.85,
0.05,
0,
0.66,
0.25,
454,
0,
1,
0,
0,
454,
0,
0
],
[
1,
0,
0.9,
0.05,
0,
0.66,
0.5,
... | [
"from model import BaseTestSuite, BaseTestCase",
"from keyword import BaseKeyword",
"from handlers import UserErrorHandler",
"from libraries import BaseLibrary",
"from statistics import Statistics"
] |
#!/usr/bin/env python
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""fixml.py -- A tool to fix broken Robot Framework output files
Usage: fixml.py inpath outpath
This tool can fix Robot Framework output files that are not properly finished
or are missing elements from the middle. It should be possible to generate
reports and logs from the fixed output afterwards with the `rebot` tool.
The tool uses BeautifulSoup module which must be installed separately.
See http://www.crummy.com/software/BeautifulSoup for more information.
Additionally, the tool is only compatible with Robot Framework 2.1.3 or newer.
"""
import sys
import os
try:
from BeautifulSoup import BeautifulStoneSoup
except ImportError:
raise ImportError('fixml.py requires BeautifulSoup to be installed: '
'http://www.crummy.com/software/BeautifulSoup/')
class Fixxxer(BeautifulStoneSoup):
NESTABLE_TAGS = {
'suite': ['robot','suite', 'statistics'],
'doc': ['suite', 'test', 'kw'],
'metadata': ['suite'],
'item': ['metadata'],
'status': ['suite', 'test', 'kw'],
'test': ['suite'],
'tags': ['test'],
'tag': ['tags'],
'kw': ['suite', 'test', 'kw'],
'msg': ['kw', 'errors'],
'arguments': ['kw'],
'arg': ['arguments'],
'statistics': ['robot'],
'errors': ['robot'],
}
__close_on_open = None
def unknown_starttag(self, name, attrs, selfClosing=0):
if name == 'robot':
attrs = [ (key, key == 'generator' and 'fixml.py' or value)
for key, value in attrs ]
if name == 'kw' and ('type', 'teardown') in attrs:
while self.tagStack[-1].name not in ['test', 'suite']:
self._popToTag(self.tagStack[-1].name)
if self.__close_on_open:
self._popToTag(self.__close_on_open)
self.__close_on_open = None
BeautifulStoneSoup.unknown_starttag(self, name, attrs, selfClosing)
def unknown_endtag(self, name):
BeautifulStoneSoup.unknown_endtag(self, name)
if name == 'status':
self.__close_on_open = self.tagStack[-1].name
else:
self.__close_on_open = None
def main(inpath, outpath):
outfile = open(outpath, 'w')
outfile.write(str(Fixxxer(open(inpath))))
outfile.close()
return outpath
if __name__ == '__main__':
try:
outpath = main(*sys.argv[1:])
except TypeError:
print __doc__
else:
print os.path.abspath(outpath)
| [
[
8,
0,
0.2554,
0.1304,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.337,
0.0109,
0,
0.66,
0.1667,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.3478,
0.0109,
0,
0.66,... | [
"\"\"\"fixml.py -- A tool to fix broken Robot Framework output files\n\nUsage: fixml.py inpath outpath\n\nThis tool can fix Robot Framework output files that are not properly finished\nor are missing elements from the middle. It should be possible to generate\nreports and logs from the fixed output afterwards with... |
#!/usr/bin/env python
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robot Framework Start/End/Elapsed Time Reporter
Usage: times2csv.py input-xml [output-csv] [include-items]
This script reads start, end, and elapsed times from all suites, tests and/or
keywords from the given output file, and writes them into an file in
comma-separated-values (CSV) format. CSV files can then be further processed
with spreadsheet programs. If the CSV output file is not given, its name is
got from the input file by replacing the '.xml' extension with '.csv'.
'include-items' can be used for defining which items to process. Possible
values are 'suite', 'test' and 'keyword', and they can be combined to specify
multiple items e.g. like 'suite-test' or 'test-keyword'.
Examples:
$ times2csv.py output.xml
$ times2csv.py path/results.xml path2/times.csv
$ times2csv.py output.xml times.csv test
$ times2csv.py output.xml times.csv suite-test
"""
import sys
import os
import csv
from robot.output import TestSuite
from robot import utils
def process_file(inpath, outpath, items):
suite = TestSuite(inpath)
outfile = open(outpath, 'wb')
writer = csv.writer(outfile)
writer.writerow(['TYPE','NAME','STATUS','START','END','ELAPSED','ELAPSED SECS'])
process_suite(suite, writer, items.lower())
outfile.close()
def process_suite(suite, writer, items, level=0):
if 'suite' in items:
process_item(suite, writer, level, 'Suite')
if 'keyword' in items:
for kw in suite.setup, suite.teardown:
process_keyword(kw, writer, level+1)
for subsuite in suite.suites:
process_suite(subsuite, writer, items, level+1)
for test in suite.tests:
process_test(test, writer, items, level+1)
def process_test(test, writer, items, level):
if 'test' in items:
process_item(test, writer, level, 'Test', 'suite' not in items)
if 'keyword' in items:
for kw in [test.setup] + test.keywords + [test.teardown]:
process_keyword(kw, writer, level+1)
def process_keyword(kw, writer, level):
if kw is None:
return
if kw.type in ['kw', 'set', 'repeat']:
kw_type = 'Keyword'
else:
kw_type = kw.type.capitalize()
process_item(kw, writer, level, kw_type)
for subkw in kw.keywords:
process_keyword(subkw, writer, level+1)
def process_item(item, writer, level, item_type, long_name=False):
if level == 0:
indent = ''
else:
indent = '| ' * (level-1) + '|- '
if long_name:
name = item.longname
else:
name = item.name
writer.writerow([indent+item_type, name, item.status, item.starttime,
item.endtime, utils.elapsed_time_to_string(item.elapsedtime),
item.elapsedtime/1000.0])
if __name__ == '__main__':
if not (2 <= len(sys.argv) <= 4) or '--help' in sys.argv:
print __doc__
sys.exit(1)
inxml = sys.argv[1]
try:
outcsv = sys.argv[2]
except IndexError:
outcsv = os.path.splitext(inxml)[0] + '.csv'
try:
items = sys.argv[3]
except IndexError:
items = 'suite-test-keyword'
process_file(inxml, outcsv, items)
print os.path.abspath(outcsv)
| [
[
8,
0,
0.2455,
0.1786,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3482,
0.0089,
0,
0.66,
0.0909,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.3571,
0.0089,
0,
0.66... | [
"\"\"\"Robot Framework Start/End/Elapsed Time Reporter\n\nUsage: times2csv.py input-xml [output-csv] [include-items]\n\nThis script reads start, end, and elapsed times from all suites, tests and/or\nkeywords from the given output file, and writes them into an file in\ncomma-separated-values (CSV) format. CSV files... |
#!/usr/bin/env python
# tool2html.py -- Creates HTML version of given tool documentation
#
# First part of this file is Pygments configuration and actual
# documentation generation follows it.
#
# Pygments configuration
#
# This code is from 'external/rst-directive.py' file included in Pygments 0.9
# distribution. For more details see http://pygments.org/docs/rstdirective/
#
"""
The Pygments MoinMoin Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.4 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
import os
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
filtered = [ line for line in content if line ]
if len(filtered)==1 and os.path.isfile(filtered[0]):
content = open(content[0]).read().splitlines()
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
#
# Creating the documentation
#
# This code is based on rst2html.py distributed with docutils
#
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
import sys
from docutils.core import publish_cmdline
def create_tooldoc(tool_name):
description = 'HTML generator for Robot Framework Tool Documentation.'
stylesheet_path = os.path.join(BASEDIR, '..', 'doc', 'userguide', 'src',
'userguide.css')
base_path = os.path.join(BASEDIR, tool_name, 'doc', tool_name)
arguments = [ '--time', '--stylesheet-path=%s' % stylesheet_path,
base_path+'.txt', base_path+'.html' ]
publish_cmdline(writer_name='html', description=description, argv=arguments)
print os.path.abspath(arguments[-1])
BASEDIR = os.path.dirname(os.path.abspath(__file__))
VALID_TOOLS = [ name for name in os.listdir(BASEDIR) if '.' not in name ]
VALID_TOOLS = [ n for n in VALID_TOOLS if os.path.isdir(os.path.join(BASEDIR, n, 'doc')) ]
if __name__ == '__main__':
try:
tool = sys.argv[1].lower()
if tool == 'all':
for name in sorted(VALID_TOOLS):
create_tooldoc(name)
elif tool in VALID_TOOLS:
create_tooldoc(tool)
else:
raise IndexError
except IndexError:
print 'Usage: tool2html.py [ tool | all ]\n\nTools:'
for tool in sorted(VALID_TOOLS):
print ' %s' % tool
| [
[
8,
0,
0.2254,
0.2465,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.3873,
0.007,
0,
0.66,
0.0455,
281,
1,
0,
0,
0,
0,
4,
0
],
[
1,
0,
0.4014,
0.007,
0,
0.66,
... | [
"\"\"\"\n The Pygments MoinMoin Parser\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n This fragment is a Docutils_ 0.4 directive that renders source code\n (to HTML only, currently) via Pygments.\n\n To use it, adjust the options below and copy the code into a module",
"INLINESTYLES = False",
"from pygments.... |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import inspect
import traceback
from StringIO import StringIO
from SimpleXMLRPCServer import SimpleXMLRPCServer
try:
import signal
except ImportError:
signal = None
class RobotRemoteServer(SimpleXMLRPCServer):
allow_reuse_address = True
def __init__(self, library, host='localhost', port=8270, allow_stop=True):
SimpleXMLRPCServer.__init__(self, (host, int(port)), logRequests=False)
self._library = library
self._allow_stop = allow_stop
self._register_functions()
self._register_signal_handlers()
print 'Robot Framework remote server starting at %s:%s' % (host, port)
self.serve_forever()
def _register_functions(self):
self.register_function(self.get_keyword_names)
self.register_function(self.run_keyword)
self.register_function(self.get_keyword_arguments)
self.register_function(self.get_keyword_documentation)
self.register_function(self.stop_remote_server)
def _register_signal_handlers(self):
def stop_with_signal(signum, frame):
self._allow_stop = True
self.stop_remote_server()
if hasattr(signal, 'SIGHUP'):
signal.signal(signal.SIGHUP, stop_with_signal)
if hasattr(signal, 'SIGINT'):
signal.signal(signal.SIGINT, stop_with_signal)
def serve_forever(self):
self._shutdown = False
while not self._shutdown:
self.handle_request()
def stop_remote_server(self):
prefix = 'Robot Framework remote server at %s:%s ' % self.server_address
if self._allow_stop:
print prefix + 'stopping'
self._shutdown = True
else:
print '*WARN* ' + prefix + 'does not allow stopping'
return True
def get_keyword_names(self):
get_kw_names = getattr(self._library, 'get_keyword_names', None) or \
getattr(self._library, 'getKeywordNames', None)
if inspect.isroutine(get_kw_names):
names = get_kw_names()
else:
names = [attr for attr in dir(self._library) if attr[0] != '_'
and inspect.isroutine(getattr(self._library, attr))]
return names + ['stop_remote_server']
def run_keyword(self, name, args):
result = {'status': 'PASS', 'return': '', 'output': '',
'error': '', 'traceback': ''}
self._intercept_stdout()
try:
return_value = self._get_keyword(name)(*args)
except:
result['status'] = 'FAIL'
result['error'], result['traceback'] = self._get_error_details()
else:
result['return'] = self._handle_return_value(return_value)
result['output'] = self._restore_stdout()
return result
def get_keyword_arguments(self, name):
kw = self._get_keyword(name)
args, varargs, _, defaults = inspect.getargspec(kw)
if inspect.ismethod(kw):
args = args[1:] # drop 'self'
if defaults:
args, names = args[:-len(defaults)], args[-len(defaults):]
args += ['%s=%s' % (n, d) for n, d in zip(names, defaults)]
if varargs:
args.append('*%s' % varargs)
return args
def get_keyword_documentation(self, name):
return inspect.getdoc(self._get_keyword(name)) or ''
def _get_keyword(self, name):
if name == 'stop_remote_server':
return self.stop_remote_server
return getattr(self._library, name)
def _get_error_details(self):
exc_type, exc_value, exc_tb = sys.exc_info()
if exc_type in (SystemExit, KeyboardInterrupt):
self._restore_stdout()
raise
return (self._get_error_message(exc_type, exc_value),
self._get_error_traceback(exc_tb))
def _get_error_message(self, exc_type, exc_value):
name = exc_type.__name__
message = str(exc_value)
if not message:
return name
if name in ('AssertionError', 'RuntimeError', 'Exception'):
return message
return '%s: %s' % (name, message)
def _get_error_traceback(self, exc_tb):
# Latest entry originates from this class so it can be removed
entries = traceback.extract_tb(exc_tb)[1:]
trace = ''.join(traceback.format_list(entries))
return 'Traceback (most recent call last):\n' + trace
def _handle_return_value(self, ret):
if isinstance(ret, (basestring, int, long, float)):
return ret
if isinstance(ret, (tuple, list)):
return [ self._handle_return_value(item) for item in ret ]
if isinstance(ret, dict):
return dict([(self._str(key), self._handle_return_value(value))
for key, value in ret.items()])
return self._str(ret)
def _str(self, item):
if item is None:
return ''
return str(item)
def _intercept_stdout(self):
# TODO: What about stderr?
sys.stdout = StringIO()
def _restore_stdout(self):
output = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = sys.__stdout__
return output
| [
[
1,
0,
0.0949,
0.0063,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.1013,
0.0063,
0,
0.66,
0.1667,
878,
0,
1,
0,
0,
878,
0,
0
],
[
1,
0,
0.1076,
0.0063,
0,
... | [
"import sys",
"import inspect",
"import traceback",
"from StringIO import StringIO",
"from SimpleXMLRPCServer import SimpleXMLRPCServer",
"try:\n import signal\nexcept ImportError:\n signal = None",
" import signal",
" signal = None",
"class RobotRemoteServer(SimpleXMLRPCServer):\n al... |
#!/usr/bin/env python
import os
import sys
class ExampleRemoteLibrary:
def count_items_in_directory(self, path):
return len(i for i in os.listdir(path) if not i.startswith('.'))
def strings_should_be_equal(self, str1, str2):
print "Comparing '%s' to '%s'" % (str1, str2)
if str1 != str2:
raise AssertionError("Given strings are not equal")
if __name__ == '__main__':
from robotremoteserver import RobotRemoteServer
RobotRemoteServer(ExampleRemoteLibrary(), *sys.argv[1:])
| [
[
1,
0,
0.15,
0.05,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.2,
0.05,
0,
0.66,
0.3333,
509,
0,
1,
0,
0,
509,
0,
0
],
[
3,
0,
0.55,
0.45,
0,
0.66,
0.6... | [
"import os",
"import sys",
"class ExampleRemoteLibrary:\n\n def count_items_in_directory(self, path):\n return len(i for i in os.listdir(path) if not i.startswith('.'))\n\n def strings_should_be_equal(self, str1, str2):\n print(\"Comparing '%s' to '%s'\" % (str1, str2))\n if str1 != s... |
import sys
from SimpleXMLRPCServer import SimpleXMLRPCServer
class SimpleLibrary(SimpleXMLRPCServer):
def __init__(self, port=8270):
SimpleXMLRPCServer.__init__(self, ('localhost', int(port)))
self.register_function(self.get_keyword_names)
self.register_function(self.run_keyword)
self.register_function(self.stop_remote_server)
self.serve_forever()
def serve_forever(self):
self._shutdown = False
while not self._shutdown:
self.handle_request()
def stop_remote_server(self):
self._shutdown = True
return True
def get_keyword_names(self):
return ['kw_1', 'kw_2', 'stop_remote_server']
def run_keyword(self, name, args):
if name == 'kw_1':
return {'status': 'PASS', 'return': ' '.join(args)}
elif name == 'kw_2':
return {'status': 'FAIL', 'error': ' '.join(args)}
else:
self.stop_remote_server()
return {'status': 'PASS'}
if __name__ == '__main__':
SimpleLibrary(*sys.argv[1:])
| [
[
1,
0,
0.027,
0.027,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0541,
0.027,
0,
0.66,
0.3333,
73,
0,
1,
0,
0,
73,
0,
0
],
[
3,
0,
0.5135,
0.7838,
0,
0.66,... | [
"import sys",
"from SimpleXMLRPCServer import SimpleXMLRPCServer",
"class SimpleLibrary(SimpleXMLRPCServer):\n\n def __init__(self, port=8270):\n SimpleXMLRPCServer.__init__(self, ('localhost', int(port)))\n self.register_function(self.get_keyword_names)\n self.register_function(self.run... |
# Can be used in the test data like ${MyObject()} or ${MyObject(1)}
class MyObject:
def __init__(self, index=''):
self.index = index
def __str__(self):
return '<MyObject%s>' % self.index
UNICODE = (u'Hyv\u00E4\u00E4 y\u00F6t\u00E4. '
u'\u0421\u043F\u0430\u0441\u0438\u0431\u043E!')
LIST_WITH_OBJECTS = [MyObject(1), MyObject(2)]
NESTED_LIST = [ [True, False], [[1, None, MyObject(), {}]] ]
NESTED_TUPLE = ( (True, False), [(1, None, MyObject(), {})] )
DICT_WITH_OBJECTS = {'As value': MyObject(1), MyObject(2): 'As key'}
NESTED_DICT = { 1: {None: False},
2: {'A': {'n': None},
'B': {'o': MyObject(), 'e': {}}} }
| [
[
3,
0,
0.25,
0.3125,
0,
0.66,
0,
605,
0,
2,
0,
0,
0,
0,
0
],
[
2,
1,
0.2188,
0.125,
1,
0.21,
0,
555,
0,
2,
0,
0,
0,
0,
0
],
[
14,
2,
0.25,
0.0625,
2,
0.53,
0,
... | [
"class MyObject:\n def __init__(self, index=''):\n self.index = index\n def __str__(self):\n return '<MyObject%s>' % self.index",
" def __init__(self, index=''):\n self.index = index",
" self.index = index",
" def __str__(self):\n return '<MyObject%s>' % self.ind... |
import sys
class RemoteTestLibrary:
_unicode = (u'Hyv\u00E4\u00E4 y\u00F6t\u00E4. '
u'\u0421\u043F\u0430\u0441\u0438\u0431\u043E!')
def get_server_language(self):
lang = sys.platform.startswith('java') and 'jython' or 'python'
return '%s%d%d' % (lang, sys.version_info[0], sys.version_info[1])
# Basic communication (and documenting keywords)
def passing(self):
"""This keyword passes.
See `Failing`, `Logging`, and `Returning` for other basic keywords.
"""
pass
def failing(self, message):
"""This keyword fails with provided `message`"""
raise AssertionError(message)
def logging(self, message, level='INFO'):
"""This keywords logs given `message` with given `level`
Example:
| Logging | Hello, world! | |
| Logging | Warning!!! | WARN |
"""
print '*%s* %s' % (level, message)
def returning(self):
"""This keyword returns a string 'returned string'."""
return 'returned string'
# Logging
def one_message_without_level(self):
print 'Hello, world!'
def multiple_messages_with_different_levels(self):
print 'Info message'
print '*DEBUG* Debug message'
print '*INFO* Second info'
print 'this time with two lines'
print '*INFO* Third info'
print '*TRACE* This is ignored'
print '*WARN* Warning'
def log_unicode(self):
print self._unicode
def logging_and_failing(self):
print '*INFO* This keyword will fail!'
print '*WARN* Run for your lives!!'
raise AssertionError('Too slow')
def logging_and_returning(self):
print 'Logged message'
return 'Returned value'
def log_control_char(self):
print '\x01'
# Failures
def base_exception(self):
raise Exception('My message')
def exception_without_message(self):
raise Exception
def assertion_error(self):
raise AssertionError('Failure message')
def runtime_error(self):
raise RuntimeError('Error message')
def name_error(self):
non_existing
def attribute_error(self):
self.non_existing
def index_error(self):
[][0]
def zero_division(self):
1/0
def custom_exception(self):
raise MyException('My message')
def failure_deeper(self, rounds=10):
if rounds == 1:
raise RuntimeError('Finally failing')
self.failure_deeper(rounds-1)
# Arguments counts
def no_arguments(self):
return 'no arguments'
def one_argument(self, arg):
return arg
def two_arguments(self, arg1, arg2):
return '%s %s' % (arg1, arg2)
def seven_arguments(self, arg1, arg2, arg3, arg4, arg5, arg6, arg7):
return ' '.join((arg1, arg2, arg3, arg4, arg5, arg6, arg7))
def arguments_with_default_values(self, arg1, arg2='2', arg3=3):
return '%s %s %s' % (arg1, arg2, arg3)
def variable_number_of_arguments(self, *args):
return ' '.join(args)
def required_defaults_and_varargs(self, req, default='world', *varargs):
return ' '.join((req, default) + varargs)
# Argument types
def string_as_argument(self, arg):
self._should_be_equal(arg, self.return_string())
def unicode_string_as_argument(self, arg):
self._should_be_equal(arg, self._unicode)
def empty_string_as_argument(self, arg):
self._should_be_equal(arg, '')
def integer_as_argument(self, arg):
self._should_be_equal(arg, self.return_integer())
def negative_integer_as_argument(self, arg):
self._should_be_equal(arg, self.return_negative_integer())
def float_as_argument(self, arg):
self._should_be_equal(arg, self.return_float())
def negative_float_as_argument(self, arg):
self._should_be_equal(arg, self.return_negative_float())
def zero_as_argument(self, arg):
self._should_be_equal(arg, 0)
def boolean_true_as_argument(self, arg):
self._should_be_equal(arg, True)
def boolean_false_as_argument(self, arg):
self._should_be_equal(arg, False)
def none_as_argument(self, arg):
self._should_be_equal(arg, '')
def object_as_argument(self, arg):
self._should_be_equal(arg, '<MyObject>')
def list_as_argument(self, arg):
self._should_be_equal(arg, self.return_list())
def empty_list_as_argument(self, arg):
self._should_be_equal(arg, [])
def list_containing_none_as_argument(self, arg):
self._should_be_equal(arg, [''])
def list_containing_objects_as_argument(self, arg):
self._should_be_equal(arg, ['<MyObject1>', '<MyObject2>'])
def nested_list_as_argument(self, arg):
exp = [ [True, False], [[1, '', '<MyObject>', {}]] ]
self._should_be_equal(arg, exp)
def dictionary_as_argument(self, arg):
self._should_be_equal(arg, self.return_dictionary())
def empty_dictionary_as_argument(self, arg):
self._should_be_equal(arg, {})
def dictionary_with_non_string_keys_as_argument(self, arg):
self._should_be_equal(arg, {'1': 2, '': True})
def dictionary_containing_none_as_argument(self, arg):
self._should_be_equal(arg, {'As value': '', '': 'As key'})
def dictionary_containing_objects_as_argument(self, arg):
self._should_be_equal(arg, {'As value': '<MyObject1>', '<MyObject2>': 'As key'})
def nested_dictionary_as_argument(self, arg):
exp = { '1': {'': False},
'2': {'A': {'n': ''}, 'B': {'o': '<MyObject>', 'e': {}}} }
self._should_be_equal(arg, exp)
def _should_be_equal(self, arg, exp):
if arg != exp:
raise AssertionError('%r != %r' % (arg, exp))
# Return values
def return_string(self):
return 'Hello, world!'
def return_unicode_string(self):
return self._unicode
def return_empty_string(self):
return ''
def return_integer(self):
return 42
def return_negative_integer(self):
return -1
def return_float(self):
return 3.14
def return_negative_float(self):
return -0.5
def return_zero(self):
return 0
def return_boolean_true(self):
return True
def return_boolean_false(self):
return False
def return_nothing(self):
pass
def return_object(self):
return MyObject()
def return_list(self):
return ['One', -2, False]
def return_empty_list(self):
return []
def return_list_containing_none(self):
return [None]
def return_list_containing_objects(self):
return [MyObject(1), MyObject(2)]
def return_nested_list(self):
return [ [True, False], [[1, None, MyObject(), {}]] ]
def return_tuple(self):
return (1, 'two', True)
def return_empty_tuple(self):
return ()
def return_nested_tuple(self):
return ( (True, False), [(1, None, MyObject(), {})] )
def return_dictionary(self):
return {'one': 1, 'spam': 'eggs'}
def return_empty_dictionary(self):
return {}
def return_dictionary_with_non_string_keys(self):
return {1: 2, None: True}
def return_dictionary_containing_none(self):
return {'As value': None, None: 'As key'}
def return_dictionary_containing_objects(self):
return {'As value': MyObject(1), MyObject(2): 'As key'}
def return_nested_dictionary(self):
return { 1: {None: False},
2: {'A': {'n': None}, 'B': {'o': MyObject(), 'e': {}}} }
def return_control_char(self):
return '\x01'
# Not keywords
def _private_method(self):
pass
def __private_method(self):
pass
attribute = 'Not a keyword'
class MyObject:
def __init__(self, index=''):
self.index = index
def __str__(self):
return '<MyObject%s>' % self.index
class MyException(Exception):
pass
if __name__ == '__main__':
import sys
from robotremoteserver import RobotRemoteServer
RobotRemoteServer(RemoteTestLibrary(), *sys.argv[1:])
| [
[
1,
0,
0.0032,
0.0032,
0,
0.66,
0,
509,
0,
1,
0,
0,
509,
0,
0
],
[
3,
0,
0.4792,
0.9359,
0,
0.66,
0.25,
637,
0,
81,
0,
0,
0,
0,
65
],
[
14,
1,
0.0208,
0.0064,
1,
0... | [
"import sys",
"class RemoteTestLibrary:\n\n _unicode = (u'Hyv\\u00E4\\u00E4 y\\u00F6t\\u00E4. '\n u'\\u0421\\u043F\\u0430\\u0441\\u0438\\u0431\\u043E!')\n\n def get_server_language(self):\n lang = sys.platform.startswith('java') and 'jython' or 'python'\n return '%s%d%d' % (lang... |
class regular:
ROBOT_LIBRARY_SCOPE = 'TEST SUITE'
"""This is a very regular test library"""
def __init__(self, arg1='hello', arg2='world'):
"""Constructs a new regular test library
See `keyword`
Examples:
| regular | foo | bar |
| regular | | # default values are used |
"""
self.arg1 = arg1
self.arg2 = arg2
def keyword(self):
"""A "keyword" & it contains 'stuff' to <escape>
See `get hello` for details"""
pass
def get_hello(self):
"""Get the intialization variables
See `importing` for explanation of arguments
and `introduction` for introduction"""
return self.arg1, self.arg2
| [
[
3,
0,
0.5167,
1,
0,
0.66,
0,
2,
0,
3,
0,
0,
0,
0,
0
],
[
14,
1,
0.0667,
0.0333,
1,
0.2,
0,
305,
1,
0,
0,
0,
0,
3,
0
],
[
8,
1,
0.1333,
0.0333,
1,
0.2,
0.25,
... | [
"class regular:\n ROBOT_LIBRARY_SCOPE = 'TEST SUITE'\n\n \"\"\"This is a very regular test library\"\"\"\n\n def __init__(self, arg1='hello', arg2='world'):\n \"\"\"Constructs a new regular test library",
" ROBOT_LIBRARY_SCOPE = 'TEST SUITE'",
" \"\"\"This is a very regular test library\"\... |
class RequiredArgs:
def __init__(self, required, arguments, default="value"):
"""This library always needs two arguments and has one default.
Keyword names are got from the given arguments.
"""
self.__dict__[required] = lambda: None
self.__dict__[arguments] = lambda arg: None
self.__dict__[default] = lambda arg1, arg2: None
| [
[
3,
0,
0.5,
0.9091,
0,
0.66,
0,
764,
0,
1,
0,
0,
0,
0,
0
],
[
2,
1,
0.5909,
0.7273,
1,
0.09,
0,
555,
0,
4,
0,
0,
0,
0,
0
],
[
8,
2,
0.5,
0.3636,
2,
0.66,
0,
... | [
"class RequiredArgs:\n\n def __init__(self, required, arguments, default=\"value\"):\n \"\"\"This library always needs two arguments and has one default.\n\n Keyword names are got from the given arguments.\n \"\"\"\n self.__dict__[required] = lambda: None",
" def __init__(self, r... |
class new_style_no_init(object):
"""No __init__ on this one."""
def kw(self):
"""The only lonely keyword."""
| [
[
3,
0,
0.5833,
1,
0,
0.66,
0,
886,
0,
1,
0,
0,
186,
0,
0
],
[
8,
1,
0.5,
0.1667,
1,
0.69,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
2,
1,
0.9167,
0.3333,
1,
0.69,
1,
... | [
"class new_style_no_init(object):\n\n \"\"\"No __init__ on this one.\"\"\"\n\n def kw(self):\n \"\"\"The only lonely keyword.\"\"\"",
" \"\"\"No __init__ on this one.\"\"\"",
" def kw(self):\n \"\"\"The only lonely keyword.\"\"\"",
" \"\"\"The only lonely keyword.\"\"\""
] |
class no_arg_init:
def __init__(self):
"""This doc not shown because there are no arguments."""
def keyword(self):
"""A keyword.
See `get hello` for details and *never* run this keyword.
"""
1/0
def get_hello(self, arg):
"""Returns 'Hello `arg`!'.
See `importing` for explanation of arguments and `introduction`
for introduction. Neither of them really exist, though.
"""
return 'Hello %s' % arg
| [
[
3,
0,
0.5,
0.95,
0,
0.66,
0,
941,
0,
3,
0,
0,
0,
0,
0
],
[
2,
1,
0.175,
0.1,
1,
0.11,
0,
555,
0,
1,
0,
0,
0,
0,
0
],
[
8,
2,
0.2,
0.05,
2,
0.78,
0,
0,
... | [
"class no_arg_init:\n\n def __init__(self):\n \"\"\"This doc not shown because there are no arguments.\"\"\"\n\n def keyword(self):\n \"\"\"A keyword.",
" def __init__(self):\n \"\"\"This doc not shown because there are no arguments.\"\"\"",
" \"\"\"This doc not shown becaus... |
class dynamic:
def get_keyword_names(self):
return ['Keyword 1', 'KW 2']
def run_keyword(self, name, args):
print name, args
def get_keyword_arguments(self, name):
return [ 'arg%d' % (i+1) for i in range(int(name[-1])) ]
def get_keyword_documentation(self, name):
return '''Dummy documentation for `%s`.
Neither `Keyword 1` or `KW 2` do anything really interesting.
They do, however, accept some `arguments`.
Examples:
| Keyword 1 | arg |
| KW 1 | arg | arg 2 |
| KW 2 | arg | arg 2 |
-------
http://robotframework.org
''' % name
| [
[
3,
0,
0.5192,
1,
0,
0.66,
0,
670,
0,
4,
0,
0,
0,
0,
3
],
[
2,
1,
0.1346,
0.0769,
1,
0.03,
0,
768,
0,
1,
1,
0,
0,
0,
0
],
[
13,
2,
0.1538,
0.0385,
2,
0.66,
0,
... | [
"class dynamic:\n\n def get_keyword_names(self):\n return ['Keyword 1', 'KW 2']\n\n def run_keyword(self, name, args):\n print(name, args)",
" def get_keyword_names(self):\n return ['Keyword 1', 'KW 2']",
" return ['Keyword 1', 'KW 2']",
" def run_keyword(self, name, ar... |
u"""Module test library.
With some non-ascii stuff:
Hyv\u00E4\u00E4 y\u00F6t\u00E4.
\u0421\u043F\u0430\u0441\u0438\u0431\u043E!
"""
ROBOT_LIBRARY_VERSION = '0.1-alpha'
def keyword():
"""A keyword
See `get hello` for details"""
pass
def get_hello():
"""Get the intialization variables
See `importing` for explanation of arguments
and `introduction` for introduction"""
return 'foo'
| [
[
8,
0,
0.1739,
0.3043,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.3913,
0.0435,
0,
0.66,
0.3333,
466,
1,
0,
0,
0,
0,
3,
0
],
[
2,
0,
0.6087,
0.2174,
0,
0.66,... | [
"u\"\"\"Module test library.\n\nWith some non-ascii stuff:\n\nHyv\\u00E4\\u00E4 y\\u00F6t\\u00E4.\n\\u0421\\u043F\\u0430\\u0441\\u0438\\u0431\\u043E!\n\"\"\"",
"ROBOT_LIBRARY_VERSION = '0.1-alpha'",
"def keyword():\n \"\"\"A keyword\n\n See `get hello` for details\"\"\"\n pass",
" \"\"\"A keyword\... |
#!/usr/bin/env python
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robot Framework Debugfile Viewer
Usage: fileviever.py [path]
This tool is mainly targeted for viewing Robot Framework debug files set
with '--debugfile' command line option when running test. The idea is to
provide a tool that has similar functionality as 'tail -f' command in
unixy systems.
The tool has a simple GUI which is updated every time the file opened into
it is updated. File can be given from command line or opened using 'Open'
button in the GUI.
"""
import os
import sys
import time
from FileDialog import LoadFileDialog
import Tkinter as Tk
class FileViewer:
def __init__(self, path=None):
self._path = path is not None and os.path.abspath(path) or None
self._file = self._open_file(path)
self._root = self._create_root()
self._create_components(self._root)
self._last_update_cmd = None
self._update()
def mainloop(self):
self._root.mainloop()
def _create_root(self):
root = Tk.Tk()
root.title('Debug file viewer, v0.1')
root.geometry('750x500+100+100')
return root
def _create_components(self, root):
self._create_toolbar(root)
self._create_statusbar(root)
self._text_area = self._create_scrollable_text_area(root)
def _create_statusbar(self, root):
statusbar = Tk.Frame(root)
self._statusbar_left = Tk.Label(statusbar)
self._statusbar_left.pack(side=Tk.LEFT)
self._statusbar_right = Tk.Label(statusbar)
self._statusbar_right.pack(side=Tk.RIGHT)
statusbar.pack(side=Tk.BOTTOM, fill=Tk.X)
def _create_toolbar(self, root):
toolbar = Tk.Frame(root, width=65)
self._create_button(toolbar, 'Open', self._open_file_dialog)
self._create_button(toolbar, 'Clear', self._clear_text)
self._create_button(toolbar, 'Exit', self._root.destroy)
self._pause_cont_button = self._create_button(toolbar, 'Pause',
self._pause_or_cont, 25)
toolbar.pack_propagate(0)
toolbar.pack(side=Tk.RIGHT, fill=Tk.Y)
def _create_button(self, parent, label, command, pady=2):
button = Tk.Button(parent, text=label, command=command)
button.pack(side=Tk.TOP, padx=2, pady=pady, fill=Tk.X)
return button
def _create_scrollable_text_area(self, root):
scrollbar = Tk.Scrollbar(root)
text = Tk.Text(root, yscrollcommand=scrollbar.set, font=("Courier", 9))
scrollbar.config(command=text.yview)
scrollbar.pack(side=Tk.RIGHT, fill=Tk.Y)
text.pack(fill=Tk.BOTH, expand=1)
return text
def _pause_or_cont(self):
if self._pause_cont_button['text'] == 'Pause':
if self._last_update_cmd is not None:
self._root.after_cancel(self._last_update_cmd)
self._pause_cont_button.configure(text='Continue')
else:
self._pause_cont_button.configure(text='Pause')
self._root.after(50, self._update)
def _update(self):
if self._file is None:
self._file = self._open_file(self._path)
if self._file is not None:
try:
if os.stat(self._path).st_size < self._last_file_size:
self._file.seek(0)
self._clear_text()
self._text_area.insert(Tk.END, self._file.read())
self._last_file_size = self._file.tell()
except (OSError, IOError):
self._file = None
self._clear_text()
self._text_area.yview('moveto', '1.0')
self._set_status_bar_text()
self._last_update_cmd = self._root.after(50, self._update)
def _clear_text(self):
self._text_area.delete(1.0, Tk.END)
def _set_status_bar_text(self):
left, right = self._path, ''
if self._path is None:
left = 'No file opened'
elif self._file is None:
right = 'File does not exist'
else:
timetuple = time.localtime(os.stat(self._path).st_mtime)
timestamp = '%d%02d%02d %02d:%02d:%02d' % timetuple[:6]
right = 'File last modified: %s' % timestamp
self._statusbar_left.configure(text=left)
self._statusbar_right.configure(text=right)
def _open_file(self, path):
if path is not None and os.path.exists(path):
self._last_file_size = os.stat(path).st_size
return open(path)
return None
def _open_file_dialog(self):
dialog = LoadFileDialog(self._root, title='Choose file to view')
fname = dialog.go()
if fname is None:
return
self._path = os.path.abspath(fname)
if self._last_update_cmd is not None:
self._root.after_cancel(self._last_update_cmd)
if self._file is not None:
self._file.close()
self._file = self._open_file(self._path)
self._clear_text()
if self._pause_cont_button['text'] == 'Continue':
self._pause_or_cont()
else:
self._update()
if __name__ == '__main__':
if len(sys.argv) > 2 or '--help' in sys.argv:
print __doc__
sys.exit(1)
app = FileViewer(*sys.argv[1:])
app.mainloop()
| [
[
8,
0,
0.1446,
0.0783,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1928,
0.006,
0,
0.66,
0.1429,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.1988,
0.006,
0,
0.66,
... | [
"\"\"\"Robot Framework Debugfile Viewer\n\nUsage: fileviever.py [path]\n\nThis tool is mainly targeted for viewing Robot Framework debug files set\nwith '--debugfile' command line option when running test. The idea is to\nprovide a tool that has similar functionality as 'tail -f' command in \nunixy systems.",
"i... |
#!/usr/bin/env python
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Robot Framework Test Status Checker
Usage: statuschecker.py infile [outfile]
This tool processes Robot Framework output XML files and checks that test case
statuses and messages are as expected. Main use case is post-processing output
files got when testing Robot Framework test libraries using Robot Framework
itself.
If output file is not given, the input file is considered to be also output
file and it is edited in place.
By default all test cases are expected to 'PASS' and have no message. Changing
the expected status to 'FAIL' is done by having word 'FAIL' (in uppercase)
somewhere in the test case documentation. Expected error message must then be
given after 'FAIL'. Error message can also be specified as a regular
expression by prefixing it with string 'REGEXP:'.
This tool also allows testing the created log messages. They are specified
using a syntax 'LOG x.y:z LEVEL Actual message', which is described in the
tool documentation.
"""
import re
from robot.output import TestSuite
def process_output(inpath, outpath=None):
suite = TestSuite(inpath)
_process_suite(suite)
suite.write_to_file(outpath)
return suite.critical_stats.failed
def _process_suite(suite):
for subsuite in suite.suites:
_process_suite(subsuite)
for test in suite.tests:
_process_test(test)
def _process_test(test):
exp = _Expected(test.doc)
_check_status(test, exp)
if test.status == 'PASS':
_check_logs(test, exp)
def _check_status(test, exp):
if exp.status != test.status:
test.status = 'FAIL'
if exp.status == 'PASS':
test.message = ("Test was expected to PASS but it FAILED. "
"Error message:\n") + test.message
else:
test.message = ("Test was expected to FAIL but it PASSED. "
"Expected message:\n") + exp.message
elif not _message_matches(test.message, exp.message):
test.status = 'FAIL'
test.message = ("Wrong error message.\n\nExpected:\n%s\n\nActual:\n%s\n"
% (exp.message, test.message))
elif test.status == 'FAIL':
test.status = 'PASS'
test.message = 'Original test failed as expected.'
def _message_matches(actual, expected):
if actual == expected:
return True
if expected.startswith('REGEXP:'):
pattern = '^%s$' % expected.replace('REGEXP:', '', 1).strip()
if re.match(pattern, actual, re.DOTALL):
return True
return False
def _check_logs(test, exp):
for kw_indices, msg_index, level, message in exp.logs:
try:
kw = test.keywords[kw_indices[0]]
for index in kw_indices[1:]:
kw = kw.keywords[index]
except IndexError:
indices = '.'.join([ str(i+1) for i in kw_indices ])
test.status = 'FAIL'
test.message = ("Test '%s' does not have keyword with index '%s'"
% (test.name, indices))
return
if len(kw.messages) <= msg_index:
if message != 'NONE':
test.status = 'FAIL'
test.message = ("Keyword '%s' should have had at least %d "
"messages" % (kw.name, msg_index+1))
else:
if _check_log_level(level, test, kw, msg_index):
_check_log_message(message, test, kw, msg_index)
def _check_log_level(expected, test, kw, index):
actual = kw.messages[index].level
if actual == expected:
return True
test.status = 'FAIL'
test.message = ("Wrong level for message %d of keyword '%s'.\n\n"
"Expected: %s\nActual: %s.\n%s"
% (index+1, kw.name, expected, actual, kw.messages[index].message))
return False
def _check_log_message(expected, test, kw, index):
actual = kw.messages[index].message.strip()
if _message_matches(actual, expected):
return True
test.status = 'FAIL'
test.message = ("Wrong content for message %d of keyword '%s'.\n\n"
"Expected:\n%s\n\nActual:\n%s"
% (index+1, kw.name, expected, actual))
return False
class _Expected:
def __init__(self, doc):
self.status, self.message = self._get_status_and_message(doc)
self.logs = self._get_logs(doc)
def _get_status_and_message(self, doc):
if 'FAIL' in doc:
return 'FAIL', doc.split('FAIL', 1)[1].split('LOG', 1)[0].strip()
return 'PASS', ''
def _get_logs(self, doc):
logs = []
for item in doc.split('LOG')[1:]:
index_str, msg_str = item.strip().split(' ', 1)
kw_indices, msg_index = self._get_indices(index_str)
level, message = self._get_log_message(msg_str)
logs.append((kw_indices, msg_index, level, message))
return logs
def _get_indices(self, index_str):
try:
kw_indices, msg_index = index_str.split(':')
except ValueError:
kw_indices, msg_index = index_str, '1'
kw_indices = [ int(index) - 1 for index in kw_indices.split('.') ]
return kw_indices, int(msg_index) - 1
def _get_log_message(self, msg_str):
try:
level, message = msg_str.split(' ', 1)
if level not in ['TRACE', 'DEBUG', 'INFO', 'WARN', 'FAIL']:
raise ValueError
except ValueError:
level, message = 'INFO', msg_str
return level, message
if __name__=='__main__':
import sys
import os
if not 2 <= len(sys.argv) <= 3 or '--help' in sys.argv:
print __doc__
sys.exit(1)
infile = sys.argv[1]
outfile = len(sys.argv) == 3 and sys.argv[2] or None
print "Checking %s" % os.path.abspath(infile)
rc = process_output(infile, outfile)
if outfile:
print "Output %s" % os.path.abspath(outfile)
if rc > 255:
rc = 255
sys.exit(rc)
| [
[
8,
0,
0.1532,
0.1183,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2204,
0.0054,
0,
0.66,
0.0833,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.2312,
0.0054,
0,
0.66... | [
"\"\"\"Robot Framework Test Status Checker\n\nUsage: statuschecker.py infile [outfile]\n\nThis tool processes Robot Framework output XML files and checks that test case\nstatuses and messages are as expected. Main use case is post-processing output\nfiles got when testing Robot Framework test libraries using Robot... |
#!/usr/bin/env python
"""qs2html.py -- Creates HTML version of Robot Framework Quick Start Guide
Usage: qs2html.py [ cr(eate) | dist | zip ]
create .. Creates the HTML version of the Quick Start Guide.
dist .... Creates the Quick Start Guide and copies it and all its dependencies
under directory named 'robotframework-quickstart-<date>'.
zip ..... Uses 'dist' to create the Quick Start Guide distribution and then
packages it into 'robotframework-quickstart-<date>.zip'.
"""
import sys
import os
import shutil
import time
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'userguide'))
import ug2html # This also initializes docutils and pygments
def create_quickstart():
from docutils.core import publish_cmdline
print 'Creating Quick Start Guide ...'
qsdir = os.path.dirname(os.path.abspath(__file__))
description = 'Quick Start Guide for Robot Framework'
arguments = '''
--time
--stylesheet-path=../userguide/src/userguide.css
quickstart.rst
quickstart.html
'''.split('\n')[1:-1]
os.chdir(qsdir)
publish_cmdline(writer_name='html', description=description, argv=arguments)
qspath = arguments[-1]
print os.path.abspath(qspath)
return qspath
def create_distribution():
qspath = create_quickstart() # we are in doc/quickstart after this
outdir = 'robotframework-quickstart-%d%02d%02d' % time.localtime()[:3]
files = { '': [qspath], 'testlibs': ['LoginLibrary.py'],
'sut': ['login.py', 'test_login.py'] }
print 'Creating distribution directory ...'
if os.path.exists(outdir):
print 'Removing previous distribution'
shutil.rmtree(outdir)
os.mkdir(outdir)
for dirname, files in files.items():
dirpath = os.path.join(outdir, dirname)
if not os.path.exists(dirpath):
print "Creating output directory '%s'" % dirpath
os.mkdir(dirpath)
for name in files:
source = os.path.join(dirname, name)
print "Copying '%s' -> '%s'" % (source, dirpath)
shutil.copy(source, dirpath)
return outdir
def create_zip():
qsdir = create_distribution()
ug2html.zip_distribution(qsdir)
if __name__ == '__main__':
actions = { 'create': create_quickstart, 'cr': create_quickstart,
'dist': create_distribution, 'zip': create_zip }
try:
actions[sys.argv[1]](*sys.argv[2:])
except (KeyError, IndexError, TypeError):
print __doc__
| [
[
8,
0,
0.1076,
0.1519,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2025,
0.0127,
0,
0.66,
0.1,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.2152,
0.0127,
0,
0.66,
... | [
"\"\"\"qs2html.py -- Creates HTML version of Robot Framework Quick Start Guide\n\nUsage: qs2html.py [ cr(eate) | dist | zip ]\n\ncreate .. Creates the HTML version of the Quick Start Guide.\n\ndist .... Creates the Quick Start Guide and copies it and all its dependencies \n under directory named 'robotfra... |
import os
import sys
import subprocess
class LoginLibrary:
def __init__(self):
self._sut_path = os.path.join(os.path.dirname(__file__),
'..', 'sut', 'login.py')
self._status = ''
def create_user(self, username, password):
self._run_command('create', username, password)
def change_password(self, username, old_pwd, new_pwd):
self._run_command('change-password', username, old_pwd, new_pwd)
def attempt_to_login_with_credentials(self, username, password):
self._run_command('login', username, password)
def status_should_be(self, expected_status):
if expected_status != self._status:
raise AssertionError("Expected status to be '%s' but was '%s'"
% (expected_status, self._status))
def _run_command(self, command, *args):
if not sys.executable:
raise RuntimeError("Could not find Jython installation")
command = [sys.executable, self._sut_path, command] + list(args)
process = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self._status = process.communicate()[0].strip()
| [
[
1,
0,
0.0303,
0.0303,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0606,
0.0303,
0,
0.66,
0.3333,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0909,
0.0303,
0,
... | [
"import os",
"import sys",
"import subprocess",
"class LoginLibrary:\n\n def __init__(self):\n self._sut_path = os.path.join(os.path.dirname(__file__),\n '..', 'sut', 'login.py')\n self._status = ''\n\n def create_user(self, username, password):",
" ... |
def simple_keyword():
"""Log a message"""
print 'You have used the simplest keyword.'
def greet(name):
"""Logs a friendly greeting to person given as argument"""
print 'Hello %s!' % name
def multiply_by_two(number):
"""Returns the given number multiplied by two
The result is always a floating point number.
This keyword fails if the given `number` cannot be converted to number.
"""
return float(number) * 2
def numbers_should_be_equal(first, second):
print '*DEBUG* Got arguments %s and %s' % (first, second)
if float(first) != float(second):
raise AssertionError('Given numbers are unequal!')
| [
[
2,
0,
0.1,
0.15,
0,
0.66,
0,
30,
0,
0,
0,
0,
0,
0,
1
],
[
8,
1,
0.1,
0.05,
1,
0.85,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
8,
1,
0.15,
0.05,
1,
0.85,
1,
535,
... | [
"def simple_keyword():\n \"\"\"Log a message\"\"\"\n print('You have used the simplest keyword.')",
" \"\"\"Log a message\"\"\"",
" print('You have used the simplest keyword.')",
"def greet(name):\n \"\"\"Logs a friendly greeting to person given as argument\"\"\"\n print('Hello %s!' % name)"... |
#!/usr/bin/env python
"""pt2html.py -- Creates HTML version of Python Tutorial
Usage: pt2html.py
"""
import sys
import os
import shutil
import time
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'userguide'))
import ug2html # This also initializes docutils and pygments
def create_tutorial():
from docutils.core import publish_cmdline
print 'Creating Python Tutorial ...'
os.chdir(os.path.dirname(os.path.abspath(__file__)))
description = 'Python Tutorial for Robot Framework Library Developers'
arguments = '''
--time
--stylesheet-path=../userguide/src/userguide.css
PythonTutorial.rst
PythonTutorial.html
'''.split('\n')[1:-1]
publish_cmdline(writer_name='html', description=description, argv=arguments)
path = arguments[-1]
print os.path.abspath(path)
return path
if __name__ == '__main__':
create_tutorial()
| [
[
8,
0,
0.125,
0.1111,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2222,
0.0278,
0,
0.66,
0.125,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.25,
0.0278,
0,
0.66,
... | [
"\"\"\"pt2html.py -- Creates HTML version of Python Tutorial\n\nUsage: pt2html.py\n\"\"\"",
"import sys",
"import os",
"import shutil",
"import time",
"sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'userguide'))",
"import ug2html # This also initializes docutils and pygments",
"de... |
#!/usr/bin/env python
"""Usage: lib2html.py [ library | all ]
Libraries:
BuiltIn (bu)
Collections (co)
Dialogs (di)
OperatingSystem (op)
Screenshot (sc)
String (st)
Telnet (te)
"""
import sys
import tempfile
import os
import re
ROOT = os.path.normpath(os.path.join(os.path.abspath(__file__),'..','..','..'))
LIBRARIES = {}
for line in __doc__.splitlines():
res = re.search('(\w+) \((\w\w)\)', line)
if res:
name, alias = res.groups()
LIBRARIES[name.lower()] = LIBRARIES[alias] = name
sys.path.insert(0, os.path.join(ROOT,'tools','libdoc'))
sys.path.insert(0, os.path.join(ROOT,'src'))
from libdoc import LibraryDoc, create_html_doc
def create_libdoc(name):
ipath = os.path.join(ROOT,'src','robot','libraries',name+'.py')
opath = os.path.join(ROOT,'doc','libraries',name+'.html')
create_html_doc(LibraryDoc(ipath), opath)
print opath
if __name__ == '__main__':
try:
name = sys.argv[1].lower()
if name == 'all':
for name in sorted(set(LIBRARIES.values())):
create_libdoc(name)
else:
create_libdoc(LIBRARIES[name])
except (IndexError, KeyError):
print __doc__
| [
[
8,
0,
0.16,
0.22,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3,
0.02,
0,
0.66,
0.0833,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.32,
0.02,
0,
0.66,
0.1667,... | [
"\"\"\"Usage: lib2html.py [ library | all ]\n\nLibraries:\n BuiltIn (bu)\n Collections (co)\n Dialogs (di)\n OperatingSystem (op)\n Screenshot (sc)",
"import sys",
"import tempfile",
"import os",
"import re",
"ROOT = os.path.normpath(os.path.join(os.path.abspath(__file__),'..','..','..'))",
"LIBRA... |
#!/usr/bin/env python
"""ug2html.py -- Creates HTML version of Robot Framework User Guide
Usage: ug2html.py [ cr(eate) | dist | zip ]
create .. Creates the user guide so that it has relative links to images,
library docs, etc. This version is stored in the version control
and distributed with the source distribution.
dist .... Creates the user guide under 'robotframework-userguide-<version>'
directory and also copies all needed images and other link targets
there. The created output directory can thus be distributed
independently.
zip ..... Uses 'dist' to create a stand-alone distribution and then packages
it into 'robotframework-userguide-<version>.zip'
Version number to use is got automatically from 'src/robot/version.py' file
created by 'package.py'.
"""
import os
import sys
import shutil
# First part of this file is Pygments configuration and actual
# documentation generation follows it.
#
#
# Pygments configuration
# ----------------------
#
# This code is from 'external/rst-directive.py' file included in Pygments 0.9
# distribution. For more details see http://pygments.org/docs/rstdirective/
#
"""
The Pygments MoinMoin Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.4 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
# possibility to read the content from an external file
filtered = [ line for line in content if line.strip() ]
if len(filtered) == 1:
path = filtered[0].replace('/', os.sep)
if os.path.isfile(path):
content = open(path).read().splitlines()
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
#
# Create the user guide using docutils
#
# This code is based on rst2html.py distributed with docutils
#
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
def create_userguide():
from docutils.core import publish_cmdline
print 'Creating user guide ...'
ugdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(ugdir, '..', '..', 'src', 'robot'))
from version import get_version
print 'Version:', get_version()
vfile = open(os.path.join(ugdir, 'src', 'version.txt'), 'w')
vfile.write('.. |version| replace:: %s\n' % get_version())
vfile.close()
description = 'HTML generator for Robot Framework User Guide.'
arguments = '''
--time
--stylesheet-path=src/userguide.css
src/RobotFrameworkUserGuide.txt
RobotFrameworkUserGuide.html
'''.split('\n')[1:-1]
os.chdir(ugdir)
publish_cmdline(writer_name='html', description=description, argv=arguments)
os.unlink(vfile.name)
ugpath = os.path.abspath(arguments[-1])
print ugpath
return ugpath, get_version(sep='-')
#
# Create user guide distribution directory
#
def create_distribution():
import re
from urlparse import urlparse
ugpath, version = create_userguide() # we are in doc/userguide after this
outdir = 'robotframework-userguide-%s' % version
tools = os.path.join(outdir, 'tools')
templates = os.path.join(outdir, 'templates')
libraries = os.path.join(outdir, 'libraries')
images = os.path.join(outdir, 'images')
print 'Creating distribution directory ...'
if os.path.exists(outdir):
print 'Removing previous user guide distribution'
shutil.rmtree(outdir)
for dirname in [outdir, tools, templates, libraries, images]:
print "Creating output directory '%s'" % dirname
os.mkdir(dirname)
def replace_links(res):
if not res.group(5):
return res.group(0)
scheme, _, path, _, _, fragment = urlparse(res.group(5))
if scheme or (fragment and not path):
return res.group(0)
replaced_link = '%s %s="%%s/%s"' % (res.group(1), res.group(4),
os.path.basename(path))
if path.startswith('../../tools'):
copy(path, tools)
copy_tool_images(path)
replaced_link = replaced_link % 'tools'
elif path.startswith('../../templates'):
copy(path, templates)
replaced_link = replaced_link % 'templates'
elif path.startswith('../libraries'):
copy(path, libraries)
replaced_link = replaced_link % 'libraries'
elif path.startswith('src/'):
copy(path, images)
replaced_link = replaced_link % 'images'
else:
raise ValueError('Invalid link target: %s' % path)
print "Modified link '%s' -> '%s'" % (res.group(0), replaced_link)
return replaced_link
def copy(source, dest):
print "Copying '%s' -> '%s'" % (source, dest)
shutil.copy(source, dest)
def copy_tool_images(path):
indir = os.path.dirname(path)
for line in open(os.path.splitext(path)[0]+'.txt').readlines():
if line.startswith('.. figure::'):
copy(os.path.join(indir, line.strip().split()[-1]), tools)
link_regexp = re.compile('''
(<(a|img)\s+.*?)
(\s+(href|src)="(.*?)"|>)
''', re.VERBOSE | re.DOTALL | re.IGNORECASE)
content = open(ugpath).read()
content = link_regexp.sub(replace_links, content)
outfile = open(os.path.join(outdir, os.path.basename(ugpath)), 'wb')
outfile.write(content)
outfile.close()
print os.path.abspath(outfile.name)
return outdir
#
# Create a zip distribution package
#
def create_zip():
ugdir = create_distribution()
zip_distribution(ugdir)
def zip_distribution(dirpath):
"""Generic zipper. Used also by qs2html.py """
from zipfile import ZipFile, ZIP_DEFLATED
print 'Creating zip package ...'
zippath = os.path.normpath(dirpath) + '.zip'
zipfile = ZipFile(zippath, 'w', compression=ZIP_DEFLATED)
for root, _, files in os.walk(dirpath):
for name in files:
path = os.path.join(root, name)
print "Adding '%s'" % path
zipfile.write(path)
zipfile.close()
print 'Removing distribution directory', dirpath
shutil.rmtree(dirpath)
print os.path.abspath(zippath)
if __name__ == '__main__':
actions = { 'create': create_userguide, 'cr': create_userguide,
'dist': create_distribution, 'zip': create_zip }
try:
actions[sys.argv[1]](*sys.argv[2:])
except (KeyError, IndexError, TypeError):
print __doc__
| [
[
8,
0,
0.0455,
0.072,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0871,
0.0038,
0,
0.66,
0.0435,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0909,
0.0038,
0,
0.66,... | [
"\"\"\"ug2html.py -- Creates HTML version of Robot Framework User Guide\n\nUsage: ug2html.py [ cr(eate) | dist | zip ]\n\ncreate .. Creates the user guide so that it has relative links to images,\n library docs, etc. This version is stored in the version control\n and distributed with the source ... |
#!/usr/bin/env python
"""Usage: check_test_times.py inpath [outpath]
Reads result of a test run from Robot output file and checks that no test
took longer than 3 minutest to execute. If outpath is not given, the
result is written over the original file.
"""
import sys
from robot.output import TestSuite
def check_tests(inpath, outpath=None):
if not outpath:
outpath = inpath
suite = TestSuite(inpath)
_check_execution_times(suite)
suite.write_to_file(outpath)
def _check_execution_times(suite):
for test in suite.tests:
if test.status == 'PASS' and test.elapsedtime > 1000 * 60 * 3:
test.status = 'FAIL'
test.message = 'Test execution time was too long: %s' % test.elapsedtime
for suite in suite.suites:
_check_execution_times(suite)
if __name__ == '__main__':
try:
check_tests(*sys.argv[1:])
except TypeError:
print __doc__
| [
[
8,
0,
0.1618,
0.1765,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2941,
0.0294,
0,
0.66,
0.2,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.3235,
0.0294,
0,
0.66,
... | [
"\"\"\"Usage: check_test_times.py inpath [outpath]\n\nReads result of a test run from Robot output file and checks that no test \ntook longer than 3 minutest to execute. If outpath is not given, the\nresult is written over the original file.\n\"\"\"",
"import sys",
"from robot.output import TestSuite",
"def c... |
class CheckMultipleItemsLibrary:
def items_should_not_contain(self, value, *items):
"""Checks that none of the given 'items' contains the given 'value'."""
items_containing_value = [ item for item in items if value in item ]
if items_containing_value:
message = "Items '%s' contains '%s'"
message = message % (', '.join(items_containing_value), value)
raise AssertionError(message)
| [
[
3,
0,
0.55,
1,
0,
0.66,
0,
960,
0,
1,
0,
0,
0,
0,
2
],
[
2,
1,
0.65,
0.8,
1,
0.76,
0,
121,
0,
3,
0,
0,
0,
0,
2
],
[
8,
2,
0.4,
0.1,
2,
0.06,
0,
0,
1,
... | [
"class CheckMultipleItemsLibrary:\n\n def items_should_not_contain(self, value, *items):\n \"\"\"Checks that none of the given 'items' contains the given 'value'.\"\"\"\n\n items_containing_value = [ item for item in items if value in item ]\n if items_containing_value:\n message ... |
"""Robot Framework test library example that calls C code.
This example uses Python's standard `ctypes` module, which requires
that the C code is compiled into a shared library.
It is also possible to execute this file from the command line
to test the C code manually.
"""
from ctypes import CDLL, c_char_p
LIBRARY = CDLL('./liblogin.so') # On Windows we'd use '.dll'
def check_user(username, password):
"""Validates user name and password using imported shared C library."""
if not LIBRARY.validate_user(c_char_p(username), c_char_p(password)):
raise AssertionError('Wrong username/password combination')
if __name__ == '__main__':
import sys
try:
check_user(*sys.argv[1:])
except TypeError:
print 'Usage: %s username password' % sys.argv[0]
except AssertionError, err:
print err
else:
print 'Valid password'
| [
[
8,
0,
0.1552,
0.2759,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3448,
0.0345,
0,
0.66,
0.25,
182,
0,
2,
0,
0,
182,
0,
0
],
[
14,
0,
0.4138,
0.0345,
0,
0.66,... | [
"\"\"\"Robot Framework test library example that calls C code.\n\nThis example uses Python's standard `ctypes` module, which requires\nthat the C code is compiled into a shared library.\n\nIt is also possible to execute this file from the command line \nto test the C code manually.\n\"\"\"",
"from ctypes import C... |
from robot import run as run_robot
import cProfile
import pstats
filename = 'robot.profile'
cProfile.run('run_robot("/home/husa/workspace/robotframework/atest/testdata/misc/")', filename)
p = pstats.Stats(filename)
p.strip_dirs().sort_stats(-1).print_stats()
| [
[
1,
0,
0.1,
0.1,
0,
0.66,
0,
735,
0,
1,
0,
0,
735,
0,
0
],
[
1,
0,
0.2,
0.1,
0,
0.66,
0.1667,
686,
0,
1,
0,
0,
686,
0,
0
],
[
1,
0,
0.3,
0.1,
0,
0.66,
0.3333,
... | [
"from robot import run as run_robot",
"import cProfile",
"import pstats",
"filename = 'robot.profile'",
"cProfile.run('run_robot(\"/home/husa/workspace/robotframework/atest/testdata/misc/\")', filename)",
"p = pstats.Stats(filename)",
"p.strip_dirs().sort_stats(-1).print_stats()"
] |
from os.path import dirname, join
import subprocess
basedir = dirname(__file__)
cmd = ['pybot', '--outputdir', join(basedir, 'results'), join(basedir, 'vacalc')]
pythonpath = '%s:%s' % (join(basedir, 'lib'), join(basedir, '..', 'src'))
subprocess.call(' '.join(cmd), shell=True, env={'PYTHONPATH': pythonpath})
| [
[
1,
0,
0.125,
0.125,
0,
0.66,
0,
79,
0,
2,
0,
0,
79,
0,
0
],
[
1,
0,
0.25,
0.125,
0,
0.66,
0.2,
394,
0,
1,
0,
0,
394,
0,
0
],
[
14,
0,
0.5,
0.125,
0,
0.66,
0.4... | [
"from os.path import dirname, join",
"import subprocess",
"basedir = dirname(__file__)",
"cmd = ['pybot', '--outputdir', join(basedir, 'results'), join(basedir, 'vacalc')]",
"pythonpath = '%s:%s' % (join(basedir, 'lib'), join(basedir, '..', 'src'))",
"subprocess.call(' '.join(cmd), shell=True, env={'PYTHO... |
import os
import sys
import subprocess
import datetime
import tempfile
import vacalc
class VacalcLibrary(object):
def __init__(self):
self._db_file = os.path.join(tempfile.gettempdir(),
'vacalc-atestdb.csv')
def count_vacation(self, startdate, year):
resource = vacalc.Employee('Test Resource', startdate)
return vacalc.Vacation(resource.startdate, int(year)).days
def clear_database(self):
if os.path.isfile(self._db_file):
print 'Removing %s' % self._db_file
os.remove(self._db_file)
def add_employee(self, name, startdate):
self._run('add_employee', name, startdate)
def get_employee(self, name):
self._run('get_employee', name)
def show_vacation(self, name, year):
self._run('show_vacation', name, year)
def _run(self, command, *args):
cmd = [sys.executable, vacalc.__file__, command] + list(args)
print subprocess.list2cmdline(cmd)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={'VACALC_DB': self._db_file})
self._status = proc.stdout.read().strip()
print self._status
def status_should_be(self, status):
if self._status != status:
raise AssertionError("Expected status to be '%s' but it was '%s'"
% (status, self._status))
| [
[
1,
0,
0.0217,
0.0217,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0435,
0.0217,
0,
0.66,
0.1667,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0652,
0.0217,
0,
... | [
"import os",
"import sys",
"import subprocess",
"import datetime",
"import tempfile",
"import vacalc",
"class VacalcLibrary(object):\n\n def __init__(self):\n self._db_file = os.path.join(tempfile.gettempdir(),\n 'vacalc-atestdb.csv')\n\n def count_vacati... |
from __future__ import with_statement
import os
import sys
import csv
import datetime
import tempfile
class VacalcError(Exception): pass
class EmployeeStore(object):
def __init__(self, db_file):
self._db_file = db_file
if self._db_file and os.path.isfile(self._db_file):
self._employees = self._read_employees(self._db_file)
else:
self._employees = {}
def _read_employees(self, path):
employees = {}
with open(path) as db:
for row in csv.reader(db):
employee = Employee(row[0], row[1])
employees[employee.name] = employee
return employees
def get_employee(self, name):
try:
return self._employees[name]
except KeyError:
raise VacalcError("Employee '%s' not found" % name)
def get_all_employees(self):
return self._employees.values()
def add_employee(self, employee):
if employee.name in self._employees:
raise VacalcError("Employee '%s' already exists in the system" %
employee.name)
self._employees[employee.name] = employee
self._serialize(employee)
def _serialize(self, employee):
if not self._db_file:
return
with open(self._db_file, 'a') as db:
writer = csv.writer(db, lineterminator='\n')
writer.writerow([employee.name, employee.startdate])
class Employee(object):
def __init__(self, name, startdate):
self.name = name
self.startdate = self._parse_date(startdate)
def _parse_date(self, datestring):
year, month, day = datestring.split('-')
return datetime.date(int(year), int(month), int(day))
class Vacation(object):
max_vacation = 12 * 2.5
no_vacation = 0
vacation_per_month = 2
credit_start_month = 4
work_days_required= 14
def __init__(self, empstartdate, vacation_year):
self.days = self._calculate_vacation(empstartdate, vacation_year)
def _calculate_vacation(self, start, year):
if self._has_worked_longer_than_year(start, year):
return self.max_vacation
if self._started_after_holiday_credit_year_ended(start, year):
return self.no_vacation
return self._count_working_months(start) * self.vacation_per_month
def _has_worked_longer_than_year(self, start, year):
return year-start.year > 1 or \
(year-start.year == 1 and start.month < self.credit_start_month)
def _started_after_holiday_credit_year_ended(self, start, year):
return start.year-year > 0 or \
(year == start.year and start.month >= self.credit_start_month)
def _count_working_months(self, start):
months = self.credit_start_month - start.month
if months <= 0:
months += 12
if self._first_month_has_too_few_working_days(start):
months -= 1
return months
def _first_month_has_too_few_working_days(self, start):
days = 0
date = start
while date:
if self._is_working_day(date):
days += 1
date = self._next_date(date)
return days < self.work_days_required
def _is_working_day(self, date):
return date.weekday() < 5
def _next_date(self, date):
try:
return date.replace(day=date.day+1)
except ValueError:
return None
class VacationCalculator(object):
def __init__(self, employeestore):
self._employeestore = employeestore
def show_vacation(self, name, year):
employee = self._employeestore.get_employee(name)
vacation = Vacation(employee.startdate, int(year))
return "%s has %d vacation days in year %s" \
% (name, vacation.days, year)
def add_employee(self, name, startdate):
employee = Employee(name, startdate)
self._employeestore.add_employee(employee)
return "Successfully added employee '%s'." % employee.name
def get_employee(self, name):
employee = self._employeestore.get_employee(name)
return '%s: start date %s' % (employee.name, employee.startdate)
def main(args):
db_file = os.environ.get('VACALC_DB', os.path.join(tempfile.gettempdir(),
'vacalcdb.csv'))
try:
cmd = getattr(VacationCalculator(EmployeeStore(db_file)), args[0])
return cmd(*args[1:])
except (AttributeError, TypeError):
raise VacalcError('invalid command or arguments')
if __name__ == '__main__':
try:
print main(sys.argv[1:])
sys.exit(0)
except VacalcError, err:
print err
sys.exit(1)
| [
[
1,
0,
0.0068,
0.0068,
0,
0.66,
0,
777,
0,
1,
0,
0,
777,
0,
0
],
[
1,
0,
0.0137,
0.0068,
0,
0.66,
0.0909,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0205,
0.0068,
0,
... | [
"from __future__ import with_statement",
"import os",
"import sys",
"import csv",
"import datetime",
"import tempfile",
"class VacalcError(Exception): pass",
"class EmployeeStore(object):\n\n def __init__(self, db_file):\n self._db_file = db_file\n if self._db_file and os.path.isfile(... |
VALUE_FROM_VAR_FILE='Expected Value'
| [
[
14,
0,
1,
1,
0,
0.66,
0,
246,
1,
0,
0,
0,
0,
3,
0
]
] | [
"VALUE_FROM_VAR_FILE='Expected Value'"
] |
def this_keyword_is_in_funnylib():
print 'jee'
| [
[
2,
0,
0.75,
1,
0,
0.66,
0,
708,
0,
0,
0,
0,
0,
0,
1
],
[
8,
1,
1,
0.5,
1,
0.01,
0,
535,
3,
1,
0,
0,
0,
0,
1
]
] | [
"def this_keyword_is_in_funnylib():\n print('jee')",
" print('jee')"
] |
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import subprocess
import time
from random import randint
import os
import re
import sys
from robot.libraries import BuiltIn
from robot.utils import html_escape, ArgumentParser
from robot.version import get_version
class Parallel(object):
"""
Library for executing tests in parallel from inside of a robot test case.
Tests are executed in subprocesses.
You can add arguments to all parallel test runs from `library importing`,
for a set of parallel tests with `Add Arguments For Parallel Tests` and
for an individual parallel test by passing the arguments in `Start Parallel Test`.
The following command line arguments (also from argument files) are automatically
passed to parallel tests:
--loglevel, --runmode, --pythonpath, --variable, --variablefile
Example:
| *Settings* |
| Library | Parallel | pybot |
| *Test Cases* |
| Runner |
| | Run Parallel Tests | Hello | World |
| Hello |
| | [Tags] | parallel |
| | Log | Hello ${WORLD} |
| World |
| | [Tags] | parallel |
| | Log | ${HELLO} World |
`pybot --exclude parallel --variable HELLO:Hello --variable WORLD:World .`
"""
def __init__(self, runner_script, *arguments):
"""
`runner_script` is pybot or jybot or a custom script.
`arguments` are default arguments given to every test execution.
Example:
| Library | Parallel | pybot | --variable | variable:value | --loglevel | DEBUG |
"""
self._script = runner_script
self._arguments = self._get_arguments(arguments)
self._processes = []
self._data_source = None
def _get_arguments(self, additional_arguments):
options,_ = ArgumentParser(_get_cmd_arguments()).parse_args(sys.argv[1:], argfile='argumentfile', unescape='escape')
args = []
for arg in ['loglevel', 'runmode', 'pythonpath', 'variable', 'variablefile']:
args += self._get_type_arguments(options, arg)
args += list(additional_arguments)
return args
def _get_type_arguments(self, options, key):
value = options[key]
args = []
if value is not None:
if not isinstance(value, list):
value = [value]
for var in value:
args += ['--%s' % key, var]
return args
def add_arguments_for_parallel_tests(self, *arguments):
"""Adds `arguments` to be used when parallel test is started.
`arguments` is a list of arguments to pass to parallel executions.
In the following example variable my_var is used in both of the tests
started with the keyword `Run Parallel Tests`:
| Add Arguments For Parallel Tests | --variable | my_var:value |
| Run Parallel Tests | Test | Another Test |
"""
self._arguments += list(arguments)
def set_data_source_for_parallel_tests(self, data_source):
"""Sets data source which is used when parallel tests are started.
`data_source` is path to file which contains the test/tests which are
started/executed with keywords `Start Parallel Test` or `Run Parallel
Tests`.
If tests to be executed are in the same suite and Robot Framework 2.5
or later is used, there is no need to use this keyword as `data_source`
can be automatically resolved.
Examples:
| Set Data Source For Parallel Tests | ${CURDIR}${/}my_parallel_suite.txt |
| Start Parallel Test | My Parallel Test |
| Wait All Parallel Tests |
"""
self._data_source = data_source
def start_parallel_test(self, test_name, *arguments):
"""Starts executing test with given `test_name` and `arguments`.
`arguments` is a list of Robot Framework command line arguments passed to
the started test execution. It should not include data source. Use
`Set Data Source For Parallel Tests` keyword for setting the data
source. Additional arguments can also be set in library import and with
`Add Arguments For Parallel Tests` keyword.
Returns a process object that represents this execution.
Example:
| Set Data Source For Parallel Tests | MySuite.txt |
| Start Parallel Test | Test From My Suite |
| Set Data Source For Parallel Tests | MyFriendsSuite.txt |
| Start Parallel Test | Test From My Friends Suite |
| Wait All Parallel Tests |
"""
if self._data_source is None:
self._data_source = BuiltIn.BuiltIn().replace_variables('${SUITE_SOURCE}')
process = _ParaRobo(test_name, self._data_source,
self._arguments+list(arguments))
process.run(self._script)
self._processes.append(process)
return process
def run_parallel_tests(self, *test_names):
"""Executes all given tests parallel and wait those to be ready.
Arguments can be set with keyword `Add Arguments For Parallel Tests`
and data source with keyword `Set Data Source For Parallel Tests`.
Example:
| Add Arguments For Parallel Tests | --variable | SOME_VARIABLE:someValue |
| Set Data Source For Parallel Tests | MySuite.txt |
| Run Parallel Tests | My Parallel Test | My Another Parallel Test |
When the parallel tests are from different data sources see the example in `Start Parallel Test`.
"""
processes = []
for name in test_names:
processes += [self.start_parallel_test(name)]
self.wait_parallel_tests(*processes)
def wait_parallel_tests(self, *processes):
"""Waits given `processes` to be ready and fails if any of the tests failed.
`Processes` are list of test execution processes returned from keyword
`Start Parallel Test`.
Example
| ${test 1}= | Start Parallel Test | First Test |
| ${test 2}= | Start Parallel Test | Test That Runs All The Time |
| Wait Parallel Tests | ${test 1} |
| ${test 3}= | Start Parallel Test | Third Test |
| Wait Parallel Tests | ${test 2} | ${test 3} |
"""
failed = []
for process in processes:
if process.wait() != 0:
failed += [process.test]
process.report()
self._processes.remove(process)
if failed:
raise AssertionError("Following tests failed:\n%s" % "\n".join(failed))
def wait_all_parallel_tests(self):
"""Wait all started test executions to be ready and fails if any of those failed."""
self.wait_parallel_tests(*self._processes)
def stop_all_parallel_tests(self):
"""Forcefully stops all the test executions.
NOTE: Requires Python 2.6 or later.
"""
for process in self._processes:
process.stop_test_execution()
self._processes = []
class _ParaRobo(object):
def __init__(self, test, data_source, arguments):
self.test = test
self._data_source = data_source
self._args = arguments
self._built_in = BuiltIn.BuiltIn()
id = self._create_id()
self._output = 'output_%s.xml' % id
self._log = 'log_%s.html' % id
self._output_dir = self._built_in.replace_variables("${OUTPUT DIR}")
self._monitor_out = os.path.join(self._output_dir, 'monitor_%s.txt' % id)
@property
def _suite_name(self):
name = os.path.splitext(os.path.basename(self._data_source))[0]
name = name.split('__', 1)[-1] # Strip possible prefix
name = name.replace('_', ' ').strip()
if name.islower():
name = name.title()
return name
def _create_id(self):
return "%s_%s" % (randint(0, 10000), time.strftime('%Y%m%d_%H%m%S.')+\
('%03d' % (int(time.time()*1000) % 1000)))
def run(self, script):
self._monitor_file = open(self._monitor_out, 'w')
cmd = [script,
'--outputdir', self._output_dir,
'--output', self._output,
'--report', 'None',
'--log', self._log,
'--monitorcolors', 'off',
'--test', self.test]+\
self._args + [self._data_source]
print "Starting test execution: %s" % " ".join(cmd)
self._process = subprocess.Popen(cmd,
shell=os.sep == '\\',
stdout=self._monitor_file,
stderr=self._monitor_file,
env=self._get_environment_variables())
def _get_environment_variables(self):
environment_variables = os.environ.copy()
if environment_variables.has_key("ROBOT_SYSLOG_FILE"):
del(environment_variables["ROBOT_SYSLOG_FILE"])
return environment_variables
def wait(self):
rc = self._process.wait()
self._monitor_file.close()
return rc
def report(self):
with open(self._monitor_out, 'r') as monitor_file:
monitor_output = monitor_file.read()
try:
os.remove(self._monitor_out)
except:
pass
match = re.search('^Log: (.*)$', monitor_output, re.MULTILINE)
monitor_output = self._replace_stdout_log_message_levels(monitor_output)
monitor_output = html_escape(monitor_output)
if match:
monitor_output = monitor_output.replace(match.group(1), '<a href="%s#test_%s.%s">%s</a>' % (self._log, self._suite_name, self.test, match.group(1)))
monitor_output = self._add_colours(monitor_output)
print "*HTML* %s" % monitor_output
def _replace_stdout_log_message_levels(self, output):
for level in ['TRACE', 'WARN', 'DEBUG', 'INFO', 'HTML']:
output = output.replace('\n*%s*' % level, '\n *%s*' % level)
return output
def _add_colours(self, output):
for name, colour in [("PASS", "pass"), ("FAIL", "fail"), ("ERROR", "fail")]:
output = output.replace(' %s ' % name, ' <span class="%s">%s</span> ' % (colour, name))
return output
def stop_test_execution(self):
try:
self._process.terminate()
except AttributeError:
pass
self.report()
def _get_cmd_arguments():
import robot
runner_path = os.path.join(os.path.dirname(os.path.abspath(robot.__file__)),
'runner.py')
with open(runner_path, 'r') as runner_file:
runner_content = runner_file.read()
return re.search('"""(.+)"""', runner_content, re.DOTALL).groups()[0]
| [
[
1,
0,
0.0542,
0.0034,
0,
0.66,
0,
777,
0,
1,
0,
0,
777,
0,
0
],
[
1,
0,
0.0576,
0.0034,
0,
0.66,
0.0833,
394,
0,
1,
0,
0,
394,
0,
0
],
[
1,
0,
0.061,
0.0034,
0,
0... | [
"from __future__ import with_statement",
"import subprocess",
"import time",
"from random import randint",
"import os",
"import re",
"import sys",
"from robot.libraries import BuiltIn",
"from robot.utils import html_escape, ArgumentParser",
"from robot.version import get_version",
"class Paralle... |
from Queue import Queue
from threading import Event
try:
from multiprocessing.managers import BaseManager
except ImportError:
class Python26Required(object):
def __call__(self, *args):
raise RuntimeError('Requires Python > 2.6')
def __getattr__(self, name):
raise RuntimeError('Requires Python > 2.6')
BaseManager = Python26Required()
class _create_caching_getter(object):
def __init__(self, clazz):
self._clazz = clazz
self._objects = {}
def __call__(self, key):
if key not in self._objects:
self._objects[key] = self._clazz()
return self._objects[key]
class Communicate(object):
"""Library for communication between processes.
For example this can be used to handle communication between processes of the Parallel robot library.
Requires Python 2.6
Example:
Process 1 test file:
| *Settings* |
| Library | Communicate |
| *Test Cases* |
| Communicator |
| | [Setup] | Start Communication Service |
| | Send Message To | my message queue | hello world! |
| | ${message}= | Receive Message From | other message queue |
| | Should Be Equal | ${message} | hello! |
| | [Teardown] | Stop Communication Service |
Process 2 test file:
| *Settings* |
| Library | Communicate | ${process 1 ip address if on a different machine} |
| *Test Cases* |
| Helloer |
| | ${message}= | Receive Message From | my message queue |
| | Should Be Equal | ${message} | hello world! |
| | Send Message To | other message queue | hello! |
"""
def __init__(self, address='127.0.0.1', port=2187):
"""
`address` of the communication server.
`port` of the communication server.
"""
self._address = address
self._port = int(port)
self._authkey = 'live long and prosper'
self._queue = None
self._connected = False
def _connect(self):
self._create_manager().connect()
self._connected = True
def start_communication_service(self):
"""Starts a communication server that will be used to share messages and objects between processes.
"""
self._create_manager(_create_caching_getter(Queue),
_create_caching_getter(Event)).start()
self._connected = True
def stop_communication_service(self):
"""Stops a started communication server.
This ensures that the server and the messages that it has don't influence the next tests.
To ensure that this keyword really happens place this in the teardown section.
"""
self._manager.shutdown()
self._connected = False
def _create_manager(self, queue_getter=None, event_getter=None):
BaseManager.register('get_queue', queue_getter)
BaseManager.register('get_event', event_getter)
self._manager = BaseManager((self._address, self._port), self._authkey)
return self._manager
def send_message_to(self, queue_id, value):
"""Send a message to a message queue.
`queue_id` is the identifier for the queue.
`value` is the message. This can be a string, a number or any serializable object.
Example:
In one process
| Send Message To | my queue | hello world! |
...
In another process
| ${message}= | Receive Message From | my queue |
| Should Be Equal | ${message} | hello world! |
"""
self._get_queue(queue_id).put(value)
def receive_message_from(self, queue_id, timeout=None):
"""Receive and consume a message from a message queue.
By default this keyword will block until there is a message in the queue.
`queue_id` is the identifier for the queue.
`timeout` is the time out in seconds to wait.
Returns the value from the message queue. Fails if timeout expires.
Example:
In one process
| Send Message To | my queue | hello world! |
...
In another process
| ${message}= | Receive Message From | my queue |
| Should Be Equal | ${message} | hello world! |
"""
timeout = float(timeout) if timeout is not None else None
return self._get_queue(queue_id).get(timeout=timeout)
def _get_queue(self, queue_id):
if not self._connected:
self._connect()
return self._manager.get_queue(queue_id)
def wait_for_event(self, event_id, timeout=None):
"""Waits until event with `event_id` is signaled.
Fails if optional timeout expires.
`timeout` is the time out in seconds to wait.
Example:
In one process
| Wait For Event | my event |
...
In another process
| Signal Event | my event |
"""
timeout = float(timeout) if timeout is not None else None
self._get_event(event_id).wait(timeout=timeout)
#NOTE! If Event#clear is ever exposed it has to be secured (for example r/w lock) that none
#of the processes can do it while another is at this position.
if not self._get_event(event_id).isSet():
raise Exception('Timeout')
def signal_event(self, event_id):
"""Signals an event.
If a process is waiting for this event it will stop waiting after the signal.
`event` is the identifier for the event.
Example:
In one process
| Wait For Event | my event |
...
In another process
| Signal Event | my event |
"""
return self._get_event(event_id).set()
def _get_event(self, event_id):
if not self._connected:
self._connect()
return self._manager.get_event(event_id)
| [
[
1,
0,
0.0057,
0.0057,
0,
0.66,
0,
952,
0,
1,
0,
0,
952,
0,
0
],
[
1,
0,
0.0114,
0.0057,
0,
0.66,
0.25,
83,
0,
1,
0,
0,
83,
0,
0
],
[
7,
0,
0.0398,
0.0511,
0,
0.66... | [
"from Queue import Queue",
"from threading import Event",
"try:\n from multiprocessing.managers import BaseManager\nexcept ImportError:\n class Python26Required(object):\n def __call__(self, *args):\n raise RuntimeError('Requires Python > 2.6')\n def __getattr__(self, name):\n... |
# -*- python -*-
# ex: set syntax=python:
import os
ROBOT_FRAMEWORK_REPOSITORY = 'http://robotframework.googlecode.com/svn/trunk/'
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
####### BUILDSLAVES
from buildbot.buildslave import BuildSlave
c['slaves'] = [BuildSlave("debian-py2.4", "robotci")]
c['slavePortnum'] = 9989
####### CHANGESOURCES
from buildbot.changes.svnpoller import SVNPoller
c['change_source'] = SVNPoller(ROBOT_FRAMEWORK_REPOSITORY, pollinterval=180)
####### SCHEDULERS
from buildbot.scheduler import Scheduler
c['schedulers'] = []
c['schedulers'].append(Scheduler(name="all", branch=None, treeStableTimer=180,
builderNames=["PybotTests"]))
####### BUILDERS
# the 'builders' list defines the Builders. Each one is configured with a
# dictionary, using the following keys:
# name (required): the name used to describe this bilder
# slavename (required): which slave to use, must appear in c['bots']
# builddir (required): which subdirectory to run the builder in
# factory (required): a BuildFactory to define how the build is run
# periodicBuildTime (optional): if set, force a build every N seconds
from buildbot.process import factory
from buildbot.steps.source import SVN
from buildbot.steps.shell import ShellCommand
from buildbot.steps.master import MasterShellCommand
from buildbot.steps.transfer import FileUpload
import glob
OUTPUT_ARCHIVE = 'outputs.zip'
RESULT_DIR = 'results'
class ReportGenerator(MasterShellCommand):
def __init__(self, **kwargs):
command = ['./generate_reports.sh', RESULT_DIR]
MasterShellCommand.__init__(self, command)
self.addFactoryArguments(command=command)
def finished(self, results):
report = open(RESULT_DIR + '/report.html').read().replace('<a href="log.html',
'<a href="log')
self.addHTMLLog('report', report)
self.addHTMLLog('log', open(RESULT_DIR + '/log.html').read())
for sublog in sorted(glob.glob(RESULT_DIR + '/log-*.html')):
self.addHTMLLog(os.path.basename(sublog), open(sublog).read())
return MasterShellCommand.finished(self, results)
f1 = factory.BuildFactory()
f1.addStep(SVN(svnurl=ROBOT_FRAMEWORK_REPOSITORY))
f1.addStep(ShellCommand(command=['python', './install.py', 'in'],
description='Installing',
descriptionDone='Install'))
f1.addStep(ShellCommand(command=['atest/run_atests.py', 'buildbot', 'python',
'--monitorcolors off',
'--exclude manual',
'atest/robot/'],
description='Robot Tests',
descriptionDone='Robot Tests',
timeout=60*60))
f1.addStep(FileUpload(slavesrc='atest/results/' + OUTPUT_ARCHIVE,
masterdest=RESULT_DIR +'/'+ OUTPUT_ARCHIVE))
f1.addStep(ReportGenerator())
b1 = {'name': "PybotTests",
'slavename': "debian-py2.4",
'builddir': "pybot-build",
'factory': f1}
c['builders'] = [b1]
####### STATUS TARGETS
from buildbot.status import html
c['status'] = []
c['status'].append(html.WebStatus(http_port=8010))
from buildbot.status import mail
c['status'].append(mail.MailNotifier(fromaddr="buildbot@robot.radiaatto.ri.fi",
extraRecipients=["robotframework-commit@googlegroups.com"],
sendToInterestedUsers=False,
relayhost='10.127.0.12'))
#
# from buildbot.status import words
# c['status'].append(words.IRC(host="irc.example.com", nick="bb",
# channels=["#example"]))
#
# from buildbot.status import client
# c['status'].append(client.PBListener(9988))
####### DEBUGGING OPTIONS
# if you set 'debugPassword', then you can connect to the buildmaster with
# the diagnostic tool in contrib/debugclient.py . From this tool, you can
# manually force builds and inject changes, which may be useful for testing
# your buildmaster without actually commiting changes to your repository (or
# before you have a functioning 'sources' set up). The debug tool uses the
# same port number as the slaves do: 'slavePortnum'.
c['debugPassword'] = "passwd"
# if you set 'manhole', you can ssh into the buildmaster and get an
# interactive python shell, which may be useful for debugging buildbot
# internals. It is probably only useful for buildbot developers. You can also
# use an authorized_keys file, or plain telnet.
#from buildbot import manhole
#c['manhole'] = manhole.PasswordManhole("tcp:9999:interface=127.0.0.1",
# "admin", "password")
####### PROJECT IDENTITY
# the 'projectName' string will be used to describe the project that this
# buildbot is working on. For example, it is used as the title of the
# waterfall HTML page. The 'projectURL' string will be used to provide a link
# from buildbot HTML pages to your project's home page.
c['projectName'] = "Robot Framework"
c['projectURL'] = "http://robotframework.org/"
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.Waterfall page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = "http://robot.radiaatto.ri.fi:8080/"
| [
[
1,
0,
0.0213,
0.0071,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
14,
0,
0.0355,
0.0071,
0,
0.66,
0.0278,
358,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.0567,
0.0071,
0,
... | [
"import os",
"ROBOT_FRAMEWORK_REPOSITORY = 'http://robotframework.googlecode.com/svn/trunk/'",
"c = BuildmasterConfig = {}",
"from buildbot.buildslave import BuildSlave",
"c['slaves'] = [BuildSlave(\"debian-py2.4\", \"robotci\")]",
"c['slavePortnum'] = 9989",
"from buildbot.changes.svnpoller import SVNP... |
from javax.swing import JFrame, JList, JPanel, JLabel, JTextField, JButton, Box, BoxLayout, JTable
from javax.swing.event import ListSelectionListener
from javax.swing.table import AbstractTableModel
from java.awt.event import ActionListener
from java.awt import FlowLayout, BorderLayout, Dimension, Font, Color
class VacalcFrame(object):
def __init__(self, employees):
self._frame = JFrame('Vacation Calculator',
defaultCloseOperation=JFrame.EXIT_ON_CLOSE)
self._frame.setContentPane(self._create_ui(employees))
self._frame.pack()
def _create_ui(self, employees):
panel = JPanel(layout=FlowLayout())
self._overview = EmployeeOverview(employees, self)
self._details = EmployeeDetails(employees)
self._welcome = Welcome()
panel.add(self._overview)
panel.add(self._welcome)
return panel
def show(self):
self._frame.setVisible(True)
def employee_selected(self, employee):
self._ensure_details_shown()
self._details.show_employee(employee)
def edit_new_employee(self):
self._ensure_details_shown()
self._details.edit_new_employee()
def _ensure_details_shown(self):
if self._welcome:
self._frame.contentPane.remove(self._welcome)
self._frame.contentPane.add(self._details)
self._frame.pack()
self._welcome = None
class EmployeeOverview(JPanel):
def __init__(self, employees, overview_listener):
JPanel.__init__(self, layout=BorderLayout())
self._listener = overview_listener
self._employee_list = self._create_employee_list(employees)
new_emp_btn = self._create_new_employee_button()
self.add(self._employee_list.widget, BorderLayout.PAGE_START)
self.add(new_emp_btn, BorderLayout.PAGE_END)
def _create_employee_list(self, employees):
list = EmployeeList(employees)
list.add_selection_listener(ListenerFactory(ListSelectionListener,
self._list_item_selected))
return list
def _create_new_employee_button(self):
btn = JButton('New Employee', name='new_employee_button')
btn.addActionListener(ListenerFactory(ActionListener, self._new_employee))
return btn
def _list_item_selected(self, event):
self._listener.employee_selected(self._employee_list.selected_employee())
def _new_employee(self, event):
self._employee_list.clear_selection()
self._listener.edit_new_employee()
class EmployeeList(object):
def __init__(self, employees):
self._employees = employees
self._employees.add_change_listener(self)
self._list = JList(preferredSize=(200, 200), name='employee_list')
self._populate_list()
def _populate_list(self):
self._list.setListData(self._employee_names())
def _employee_names(self):
return [e.name for e in self._employees.all()]
def add_selection_listener(self, listener):
self._list.addListSelectionListener(listener)
def selected_employee(self):
return self._employees.all()[self._list.getSelectedIndex()]
def employee_added(self, employee):
self._populate_list()
self._list.setSelectedValue(employee.name, True)
def adding_employee_failed(self, error):
pass
def clear_selection(self):
self._list.clearSelection()
@property
def widget(self):
return self._list
class EmployeeDetails(JPanel):
def __init__(self, employees):
JPanel.__init__(self, preferredSize=(400, 200))
layout = BoxLayout(self, BoxLayout.Y_AXIS)
self.setLayout(layout)
self._employees = employees
employees.add_change_listener(self)
self._create_status_label()
self._create_name_editor()
self._create_start_date_editor()
self._create_save_button()
self._create_vacation_display()
self._adding_employee = False
def _create_status_label(self):
self._status_label = JLabel(name='status_label',
font=Font(Font.SANS_SERIF, Font.PLAIN, 11))
self.add(self._status_label)
self._add_with_padding(self._status_label, 5)
def _create_name_editor(self):
self.add(JLabel(text='Employee Name:'))
self._name_editor = FixedHeightTextField('name_input')
self._add_with_padding(self._name_editor, 5)
def _create_start_date_editor(self):
self.add(JLabel(text='Start Date (yyyy-mm-dd):'))
self._start_date_editor = FixedHeightTextField('start_input')
self._add_with_padding(self._start_date_editor, 5)
def _create_save_button(self):
self._save_button = JButton('Save', name='save_button', visible=False)
self._save_button.addActionListener(ListenerFactory(ActionListener,
self._save_button_pushed))
self._add_with_padding(self._save_button, 5)
def _create_vacation_display(self):
# self._display = JTable()
# self._header = self._display.getTableHeader()
# self.add(self._header)
# self.add(self._display)
pass
def _add_with_padding(self, component, padding):
self.add(component)
self.add(Box.createRigidArea(Dimension(0, padding)))
def show_employee(self, employee):
self._name_editor.setText(employee.name)
self._start_date_editor.setText(str(employee.startdate))
self._name_editor.setEditable(False)
self._start_date_editor.setEditable(False)
self._save_button.setVisible(False)
if self._adding_employee:
self._adding_employee = False
else:
self._status_label.setText('')
# self._display.setVisible(True)
# self._display.setModel(VacationTableModel(employee))
# self._header.setVisible(True)
def edit_new_employee(self):
self._name_editor.setText('')
self._start_date_editor.setText('')
self._name_editor.setEditable(True)
self._start_date_editor.setEditable(True)
self._save_button.setVisible(True)
# self._display.setVisible(False)
# self._header.setVisible(False)
self._adding_employee = True
def _save_button_pushed(self, event):
self._employees.add(self._name_editor.getText(),
self._start_date_editor.getText())
def employee_added(self, employee):
self._status_label.setForeground(Color.BLACK)
self._status_label.setText("Employee '%s' was added successfully." % employee.name)
self._save_button.setVisible(False)
def adding_employee_failed(self, reason):
self._status_label.setForeground(Color.RED)
self._status_label.setText(reason)
class FixedHeightTextField(JTextField):
def __init__(self, name):
JTextField.__init__(self, name=name)
prefsize = self.preferredSize
maxsize = self.maximumSize
self.setMaximumSize(Dimension(maxsize.width, prefsize.height))
class Welcome(JPanel):
def __init__(self):
JPanel.__init__(self, preferredSize=(400,200))
self.add(JLabel('VaCalc v0.1'))
class VacationTableModel(AbstractTableModel):
_columns = ['Year', 'Vacation']
def __init__(self, employee):
self._employee = employee
def getColumnName(self, index):
return self._columns[index]
def getColumnCount(self):
return 2
def getRowCount(self):
return 1
def getValueAt(self, row, col):
if col == 0:
return '2010'
return '%s days' % self._employee.count_vacation(2010)
def ListenerFactory(interface, func):
from java.lang import Object
method = list(set(dir(interface)) - set(dir(Object)))[0]
return type('Listener', (interface,), {method: func})()
| [
[
1,
0,
0.0043,
0.0043,
0,
0.66,
0,
828,
0,
9,
0,
0,
828,
0,
0
],
[
1,
0,
0.0085,
0.0043,
0,
0.66,
0.0833,
182,
0,
1,
0,
0,
182,
0,
0
],
[
1,
0,
0.0128,
0.0043,
0,
... | [
"from javax.swing import JFrame, JList, JPanel, JLabel, JTextField, JButton, Box, BoxLayout, JTable",
"from javax.swing.event import ListSelectionListener",
"from javax.swing.table import AbstractTableModel",
"from java.awt.event import ActionListener",
"from java.awt import FlowLayout, BorderLayout, Dimens... |
from vacalcapp import VacalcApplication
| [
[
1,
0,
1,
1,
0,
0.66,
0,
545,
0,
1,
0,
0,
545,
0,
0
]
] | [
"from vacalcapp import VacalcApplication"
] |
#!/usr/bin/env python
"""Packaging script for Robot Framework
Usage: package.py command version_number [release_tag]
Argument 'command' can have one of the following values:
- sdist : create source distribution
- wininst : create Windows installer
- all : create both packages
- version : update only version information in 'src/robot/version.py'
- jar : create stand-alone jar file containing RF and Jython
'version_number' must be a version number in format '2.x(.y)', 'trunk' or
'keep'. With 'keep', version information is not updated.
'release_tag' must be either 'alpha', 'beta', 'rc' or 'final', where all but
the last one can have a number after the name like 'alpha1' or 'rc2'. When
'version_number' is 'trunk', 'release_tag' is automatically assigned to the
current date.
When creating the jar distribution, jython.jar must be placed in 'ext-lib'
directory, under the project root.
This script uses 'setup.py' internally. Distribution packages are created
under 'dist' directory, which is deleted initially. Depending on your system,
you may need to run this script with administrative rights (e.g. with 'sudo').
Examples:
package.py sdist 2.0 final
package.py wininst keep
package.py all 2.1.13 alpha
package.py sdist trunk
package.py version trunk
"""
import sys
import os
from os.path import abspath, dirname, exists, join
import shutil
import re
import time
import subprocess
import zipfile
from glob import glob
ROOT_PATH = abspath(dirname(__file__))
DIST_PATH = join(ROOT_PATH, 'dist')
BUILD_PATH = join(ROOT_PATH, 'build')
ROBOT_PATH = join(ROOT_PATH, 'src', 'robot')
JAVA_SRC = join(ROOT_PATH, 'src', 'java', 'org', 'robotframework')
JYTHON_JAR = glob(join(ROOT_PATH, 'ext-lib', 'jython-standalone-*.jar'))[0]
SETUP_PATH = join(ROOT_PATH, 'setup.py')
VERSION_PATH = join(ROBOT_PATH, 'version.py')
VERSIONS = [re.compile('^2\.\d+(\.\d+)?$'), 'trunk', 'keep']
RELEASES = [re.compile('^alpha\d*$'), re.compile('^beta\d*$'),
re.compile('^rc\d*$'), 'final']
VERSION_CONTENT = """# Automatically generated by 'package.py' script.
import sys
VERSION = '%(version_number)s'
RELEASE = '%(release_tag)s'
TIMESTAMP = '%(timestamp)s'
def get_version(sep=' '):
if RELEASE == 'final':
return VERSION
return VERSION + sep + RELEASE
def get_full_version(who=''):
sys_version = sys.version.split()[0]
version = '%%s %%s (%%s %%s on %%s)' \\
%% (who, get_version(), _get_interpreter(), sys_version, sys.platform)
return version.strip()
def _get_interpreter():
if sys.platform.startswith('java'):
return 'Jython'
if sys.platform == 'cli':
return 'IronPython'
return 'Python'
"""
def sdist(*version_info):
version(*version_info)
_clean()
_create_sdist()
_announce()
def wininst(*version_info):
version(*version_info)
_clean()
if _verify_platform(*version_info):
_create_wininst()
_announce()
def all(*version_info):
version(*version_info)
_clean()
_create_sdist()
if _verify_platform(*version_info):
_create_wininst()
_announce()
def version(version_number, release_tag=None):
_verify_version(version_number, VERSIONS)
if version_number == 'keep':
_keep_version()
elif version_number =='trunk':
_update_version(version_number, '%d%02d%02d' % time.localtime()[:3])
else:
_update_version(version_number, _verify_version(release_tag, RELEASES))
sys.path.insert(0, ROBOT_PATH)
from version import get_version
return get_version(sep='-')
def _verify_version(given, valid):
for item in valid:
if given == item or (hasattr(item, 'search') and item.search(given)):
return given
raise ValueError
def _update_version(version_number, release_tag):
timestamp = '%d%02d%02d-%02d%02d%02d' % time.localtime()[:6]
vfile = open(VERSION_PATH, 'wb')
vfile.write(VERSION_CONTENT % locals())
vfile.close()
print 'Updated version to %s %s' % (version_number, release_tag)
def _keep_version():
sys.path.insert(0, ROBOT_PATH)
from version import get_version
print 'Keeping version %s' % get_version()
def _clean():
print 'Cleaning up...'
for path in [DIST_PATH, BUILD_PATH]:
if exists(path):
shutil.rmtree(path)
def _verify_platform(version_number, release_tag=None):
if release_tag == 'final' and os.sep != '\\':
print 'Final Windows installers can only be created in Windows.'
print 'Windows installer was not created.'
return False
return True
def _create_sdist():
_create('sdist', 'source distribution')
def _create_wininst():
_create('bdist_wininst', 'Windows installer')
if os.sep != '\\':
print 'Warning: Windows installers created on other platforms may not'
print 'be exactly identical to ones created in Windows.'
def _create(command, name):
print 'Creating %s...' % name
rc = os.system('%s %s %s' % (sys.executable, SETUP_PATH, command))
if rc != 0:
print 'Creating %s failed.' % name
sys.exit(rc)
print '%s created successfully.' % name.capitalize()
def _announce():
print 'Created:'
for path in os.listdir(DIST_PATH):
print abspath(join(DIST_PATH, path))
def jar(*version_info):
ver = version(*version_info)
tmpdir = _create_tmpdir()
_compile_java_classes(tmpdir)
_unzip_jython_jar(tmpdir)
_copy_robot_files(tmpdir)
_compile_all_py_files(tmpdir)
_overwrite_manifest(tmpdir, ver)
jar_path = _create_jar_file(tmpdir, ver)
shutil.rmtree(tmpdir)
print 'Created %s based on %s' % (jar_path, JYTHON_JAR)
def _compile_java_classes(tmpdir):
source_files = [join(JAVA_SRC, f)
for f in os.listdir(JAVA_SRC) if f.endswith('.java')]
print 'Compiling %d source files' % len(source_files)
subprocess.call(['javac', '-d', tmpdir, '-target', '1.5', '-cp', JYTHON_JAR]
+ source_files)
def _create_tmpdir():
tmpdir = join(ROOT_PATH, 'tmp-jar-dir')
if exists(tmpdir):
shutil.rmtree(tmpdir)
os.mkdir(tmpdir)
return tmpdir
def _unzip_jython_jar(tmpdir):
zipfile.ZipFile(JYTHON_JAR).extractall(tmpdir)
def _copy_robot_files(tmpdir):
# pyc files must be excluded so that compileall works properly.
todir = join(tmpdir, 'Lib', 'robot')
shutil.copytree(ROBOT_PATH, todir, ignore=shutil.ignore_patterns('*.pyc*'))
def _compile_all_py_files(tmpdir):
subprocess.call(['java', '-jar', JYTHON_JAR, '-m', 'compileall', tmpdir])
# Jython will not work without its py-files, but robot will
for root, _, files in os.walk(join(tmpdir,'Lib','robot')):
for f in files:
if f.endswith('.py'):
os.remove(join(root, f))
def _overwrite_manifest(tmpdir, version):
with open(join(tmpdir, 'META-INF', 'MANIFEST.MF'), 'w') as mf:
mf.write('''Manifest-Version: 1.0
Main-Class: org.robotframework.RobotFramework
Specification-Version: 2
Implementation-Version: %s
''' % version)
def _create_jar_file(source, version):
path = join(DIST_PATH, 'robotframework-%s.jar' % version)
if not exists(DIST_PATH):
os.mkdir(DIST_PATH)
_fill_jar(source, path)
return path
def _fill_jar(sourcedir, jarpath):
subprocess.call(['jar', 'cvfM', jarpath, '.'], cwd=sourcedir)
if __name__ == '__main__':
try:
globals()[sys.argv[1]](*sys.argv[2:])
except (KeyError, IndexError, TypeError, ValueError):
print __doc__
| [
[
8,
0,
0.0798,
0.1387,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1555,
0.0042,
0,
0.66,
0.0233,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.1597,
0.0042,
0,
0.66... | [
"\"\"\"Packaging script for Robot Framework\n\nUsage: package.py command version_number [release_tag]\n\nArgument 'command' can have one of the following values:\n - sdist : create source distribution\n - wininst : create Windows installer\n - all : create both packages",
"import sys",
"import os",... |
'''
一阶字典读
'''
def getElement_twolevel(dic, key1, key2):
if dic.has_key(key1):
if dic[key1][0] == 0:
return 0.0
if dic[key1].has_key(key2):
return 1.0 * dic[key1][key2] / dic[key1][0]
return 0.0
'''
一阶字典写
'''
def getElement_onelevel(dic, key):
if dic[0] == 0:
return 0.0
if dic.has_key(key):
return 1.0 * dic[key] / dic[0]
return 0.0
'''
二阶字典写
'''
def addElement_twolevel(dic, key1, key2, value):
if not dic.has_key(key1):
dic[key1] = {}
dic[key1][key2] = value
dic[key1][0] = value
else:
if not dic[key1].has_key(key2):
dic[key1][key2] = value
dic[key1][0] += value
else:
dic[key1][key2] = dic[key1][key2] + value
dic[key1][0] += value
| [
[
8,
0,
0.0588,
0.0882,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
2,
0,
0.2059,
0.2059,
0,
0.66,
0.2,
516,
0,
3,
1,
0,
0,
0,
2
],
[
4,
1,
0.2059,
0.1471,
1,
0.97,
... | [
"'''\n一阶字典读\n'''",
"def getElement_twolevel(dic, key1, key2):\n\tif dic.has_key(key1):\n\t\tif dic[key1][0] == 0:\n\t\t\treturn 0.0\n\t\tif dic[key1].has_key(key2):\n\t\t\treturn 1.0 * dic[key1][key2] / dic[key1][0]\n\treturn 0.0",
"\tif dic.has_key(key1):\n\t\tif dic[key1][0] == 0:\n\t\t\treturn 0.0\n\t\tif di... |
'''
prerequisite:
1) allocate the term-id and doc-id;
2) build a language model for each passage (fixed window)
3) get an initialed translation model
'''
from ML import ML
from QueryManager import QueryManager
from Query import Query
from Lexicon import Lexicon
from TranslationModel import TranslationModel
from time import time
from DictUtils import *
from multiprocessing import Process, Pipe
from config import EM_TM_path
def printModel(dic):
for key in dic.keys():
line = str(key) + ' '
for k2 in dic[key].keys():
line = line + str(k2) + ':' + str(dic[key][k2])
print line
#将一个字典写入文件,按照进程编号
def WriteDict(d, num):
wf = open(str(num), 'w')
for key in d.keys():
line = str(key) + ' '
for k2 in d[key].keys():
line = line + str(k2) + ':' + str(d[key][k2])+' '
wf.write(line+'\n')
wf.close()
#将一个字典从文件中读出,按照进程编号
def ReadDict(num):
d = {}
rf = open(str(num), 'r')
lines = rf.readlines()
rf.close()
for line in lines:
key = int(line.split()[0])
d[key] = {}
items = line.split()[1:]
for item in items:
k = int(item.split(':')[0])
v = float(item.split(':')[1])
d[key][k] = v
return d
class ATM: # aligned translation model
'''
EM algorithm for training:
qd_reader: a reader for query-doc pair;
init_tm:
'''
#每一个进程对应的训练子任务,[begin,end)对应这个进程处理的query范围,num是这个进程的编号,prev_global_translation是上一次迭代后的ATM
def train_mul(self, begin, end,num, prev_global_translation):
doc_translation_local = {}
for query_index in xrange(begin, end):
query = self.qm.getQuery(query_index)#获得一个query对象
q = []
for term in query.getQuery().split():#获得一个query的文本内容,即一个字符串
q.append(self.lexicon.getIdByTerm(term))#获得term这个词的编号
doc = ML(str(query.getAnsdoc()))#创建一篇文档的最大似然对象,若需要这篇文档的最大似然需要load一下,若只需要paasage models则不需要load
passage_models = doc.getPassageModels()#获得这篇文本的最大似然对象的passage model的列表
passage_scores = []
passage_translations = []
for lm in passage_models:
# t3 = clock()
passage_score, passage_translation = self.passage_score(q, lm, prev_global_translation)
passage_scores.append(passage_score)
passage_translations.append(passage_translation)
# t4 = clock()
# print 'Query pair cost %f s' % (t4-t3)
self.passage_norm(passage_scores)
doc_translation = self.doc_translation_norm(passage_translations, passage_scores)
self.update_translation(doc_translation_local, doc_translation)
WriteDict(doc_translation_local, num)#暂时写入文件,待所有子进程执行结束后,再在主进程中进行归并
'''
我觉得问题可能主要出在:
1.有关字典的操作中,我的词的编号是从1开始的,所以0号位置储存的和
2.只有passage_translation没有0号元素,其他的字典都如第1条所示
3.DictUtils中有三个函数,分别对应一阶字典的读写和二阶字典的写
这一部分的处理可以会有些混乱,因为每次都是从字典中通过keys方法获取需要计算的词的列表,所以每一步我都要判断,对于key=0时,不做处理。
'''
def train(self, init_tm, iterate_num, model_diff):
prev_global_translation = init_tm
self.qm = QueryManager()
self.qm.load()
self.collection = ML('collection')
self.collection.load()
query_count = 10000#self.qm.getSize() #test for 1000 queries
self.lexicon = Lexicon()
self.lexicon.load()
for i in xrange(iterate_num):
# import pdb
# pdb.set_trace()
t1 = time()
print 'Iterate %d model :' % (i+1)
# printModel(prev_global_translation)
global_translation = {}
pool = []
kernelnum = 16
for j in xrange(kernelnum):
pool.append(Process(target=self.train_mul, args=(query_count*j/kernelnum, query_count*(j+1)/kernelnum,j,prev_global_translation)))
for j in xrange(kernelnum):
pool[j].start()
for j in xrange(kernelnum):
pool[j].join()
for j in xrange(kernelnum):
doc_translation = ReadDict(j)
self.update_translation(global_translation, doc_translation)
self.translation_norm(global_translation)
error = self.compare(prev_global_translation, global_translation)
print 'Iterate %d error %f .' % (i+1, error)
if(error < model_diff):
break;
prev_global_translation = global_translation;
t2 = time()
print 'Iterate %d cost %f s' % (i+1, t2-t1)
self.model = global_translation
def writeModel(self):
f = open('EM_TM_path', 'w')#test path
for td in self.model.keys():
line = str(td) + ' '
for tq in self.model[td].keys():
if tq == 0:
continue
line = line + str(tq) + ':' + str(self.model[td][tq]) + ' '
line = line + '\n'
f.write(line)
f.close()
'''
读取模型文件
'''
def load(self):
f = open('EM_TM_path', 'r')
self.model = {}
lines = f.readlines()
f.close()
for line in lines:
items = line.split()
td = int(items[0])
for item in items[1:]:
tq = int(item.split(':')[0])
value = float(item.split(':')[1])
addElement_twolevel(self.model, td, tq ,value)
'''
获得词td->tq的概率
'''
def getProb(self, td, tq):
return getElement_twolevel(self.model, td, tq)
def passage_score(self, q, lm, ptm):
score = 1.0
translation = {}
for td in lm.keys():
if td == 0:
continue
translation[td] = {}
col_ML = ML('collection')
col_ML.load()
for tq in q:
k_score = 0.0
for td in lm.keys():
if td == 0:
continue
p1 = getElement_twolevel(ptm, td, tq)
p2 = getElement_onelevel(lm, td)
tmp_alpha = 1e-5
tmp_score = p1*((1-tmp_alpha)*p2+self.collection.getProb(td)*tmp_alpha)
if tmp_score== 0:
continue
translation[td][tq] = tmp_score
k_score = k_score + tmp_score
score = score * k_score
return (score, translation)
def passage_norm(self, passage_scores):
denominator = 0.0
for score in passage_scores:
denominator = denominator + score
if denominator == 0:
return
for i in xrange(len(passage_scores)):
passage_scores[i] = passage_scores[i] / denominator
def doc_translation_norm(self, passage_translations, passage_scores):
doc_translation = {}
for k in xrange(len(passage_scores)):
if passage_scores[k] == 0:
continue
for td in passage_translations[k].keys():
for tq in passage_translations[k][td].keys():
addElement_twolevel(doc_translation, tq, td, passage_scores[k] * passage_translations[k][td][tq])
for tq in doc_translation.keys():
for td in doc_translation[tq].keys():
if td == 0:
continue #Remember not do normalization to 0th element
doc_translation[tq][td] = doc_translation[tq][td] / doc_translation[tq][0]
doc_translation[tq][0] = 1.0
return doc_translation
def update_translation(self, global_translation, doc_translation):
for tq in doc_translation.keys():
for td in doc_translation[tq].keys():
if td == 0:
continue
addElement_twolevel(global_translation, td, tq, doc_translation[tq][td])
def translation_norm(self, global_translation):
for td in global_translation.keys():
for tq in global_translation[td].keys():
if tq == 0:
continue
global_translation[td][tq] = global_translation[td][tq] / global_translation[td][0]
global_translation[td][0] = 1.0
def compare(self, pt, gt):
diff = 0.0
td_list = set(pt.keys()) | set(gt.keys()) - set([0])
word_num = 0
for td in td_list:
tq_list = set()
if pt.has_key(td):
tq_list = tq_list | set(pt[td].keys())
if gt.has_key(td):
tq_list = tq_list | set(gt[td].keys())
tq_list = tq_list - set([0])
for tq in tq_list:
word_num = word_num + 1
diff = diff + abs(getElement_twolevel(pt, td, tq) - getElement_twolevel(gt, td, tq))
print 'word_num: %d' % (word_num)
return diff
if __name__ == '__main__':
atm = ATM()
tm_model = TranslationModel()
i_tm = tm_model.getTM_dict(True)
atm.train(i_tm, 10, 100)
atm.load()
atm.writeModel()
print atm.getProb(65542, 71749)
| [
[
8,
0,
0.0184,
0.0246,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0328,
0.0041,
0,
0.66,
0.0714,
455,
0,
1,
0,
0,
455,
0,
0
],
[
1,
0,
0.0369,
0.0041,
0,
0.66... | [
"'''\nprerequisite:\n1) allocate the term-id and doc-id;\n2) build a language model for each passage (fixed window)\n3) get an initialed translation model\n'''",
"from ML import ML",
"from QueryManager import QueryManager",
"from Query import Query",
"from Lexicon import Lexicon",
"from TranslationModel i... |
from os.path import exists, join
from Document import Document
from Lexicon import Lexicon
from config import *
from Query import Query
class QueryManager:
def create(self):
query_id = 0
query_file = open(query_path, 'w')
for i in xrange(doccount):
paper = Document(i)
comment_list = paper.getComments()
for j in xrange(len(comment_list)):
query_file.write('queryid='+str(query_id)+'\n')
query_file.write('ans_doc='+str(i)+'\n')
query_file.write('query='+comment_list[j].strip()+'\n')
query_id = query_id + 1
query_file.close()
def load(self):
self.query_list = []
query_file = open(query_path, 'r')
lines = query_file.readlines()
query_file.close()
querycount = (len(lines) + 1) / 3
for i in xrange(0, len(lines), 3):
query = Query(lines[i].split('=')[1].strip(),
int(lines[i+1].split('=')[1].strip()),lines[i+2].split('=')[1].strip())
self.query_list.append(query)
'''
根据query_id来获取query对象
'''
def getQuery(self, query_id):
return self.query_list[query_id]
def getSize(self):
return len(self.query_list)
if __name__ == '__main__':
qm = QueryManager()
qm.create()
qm.load()
print qm.getQuery(28911)
print qm.getSize()
| [
[
1,
0,
0.0263,
0.0263,
0,
0.66,
0,
79,
0,
2,
0,
0,
79,
0,
0
],
[
1,
0,
0.0526,
0.0263,
0,
0.66,
0.2,
920,
0,
1,
0,
0,
920,
0,
0
],
[
1,
0,
0.0789,
0.0263,
0,
0.66,... | [
"from os.path import exists, join",
"from Document import Document",
"from Lexicon import Lexicon",
"from config import *",
"from Query import Query",
"class QueryManager:\n\n\tdef create(self):\n\t\tquery_id = 0\n\t\tquery_file = open(query_path, 'w')\n\t\tfor i in xrange(doccount):\n\t\t\tpaper = Docum... |
from Lexicon import Lexicon
from ML import ML
from config import *
from TranslationModel import TranslationModel
class OfflineTranslationModel:
'''
compute offline sum P(q|w)*P(w|D)
'''
def create(self):
lexicon = Lexicon()
lexicon.load()
doc_list = []
offline_tm = []
for doc in xrange(doccount):
ml = ML(str(doc))
ml.load()
doc_list.append(ml)
trans_model = TranslationModel()
trans_model.load()
for doc in xrange(doccount):
print 'Processing doc ' + str(doc)
dic = {}
for wordid in doc_list[doc].getWordsList():
extensionlist = trans_model.getExtensionList(wordid)
for trans_id in extensionlist:
if dic.has_key(trans_id):
dic[trans_id] = dic[trans_id] + trans_model.getProb(wordid, trans_id) * doc_list[doc].getProb(wordid)
else:
dic[trans_id] = trans_model.getProb(wordid, trans_id) * doc_list[doc].getProb(wordid)
offline_tm.append(dic)
f = open(Offline_TM_path, 'w')
for doc in xrange(doccount):
line = ''
for (key, value) in offline_tm[doc].items():
line = line + str(key) + ':' + str(value) + ' '
line = line + '\n'
f.write(line)
def load(self):
self.offline_tm = []
f = open(offline_TM_path, 'r')
lines = f.readlines()
f.close()
for i in xrange(len(lines)):
items = lines[i].split()
dic = {}
for item in items:
dic[int(item.split(':')[0])] = float(item.split(':')[1].strip())
self.offline_tm.append(dic)
def getProb(self, docId, wordId):
if self.offline_tm[docId].has_key(wordId):
return self.offline_tm[docId][wordId]
else:
return 0.0
if __name__ == '__main__':
otm = OfflineTranslationModel()
otm.create()
otm.load()
print otm.getProb(5182, 10242)
| [
[
1,
0,
0.0164,
0.0164,
0,
0.66,
0,
16,
0,
1,
0,
0,
16,
0,
0
],
[
1,
0,
0.0328,
0.0164,
0,
0.66,
0.2,
455,
0,
1,
0,
0,
455,
0,
0
],
[
1,
0,
0.0492,
0.0164,
0,
0.66,... | [
"from Lexicon import Lexicon",
"from ML import ML",
"from config import *",
"from TranslationModel import TranslationModel",
"class OfflineTranslationModel:\n\t'''\n\tcompute offline sum P(q|w)*P(w|D)\n\t'''\n\tdef create(self):\n\t\tlexicon = Lexicon()\n\t\tlexicon.load()\n\t\tdoc_list = []",
"\t'''\n\tc... |
from config import OfflineTMSingle_path
from os.path import join
class OfflineTMSingle:
def __init__(self, docId):
self.path = join(OfflineTMSingle_path, str(docId))
def load(self):
self.model = {}
f = open(self.path, 'r')
line = f.readlines()[0]
f.close()
items = line.split()
for item in items:
self.model[int(item.split(':')[0])]=float(item.split(':')[1])
def getProb(self, wordId):
if self.model.has_key(wordId):
return self.model[wordId]
return 0.0
if __name__ == '__main__':
otms = OfflineTMSingle(12)
otms.load()
print otms.getProb(262153)
| [
[
1,
0,
0.0435,
0.0435,
0,
0.66,
0,
308,
0,
1,
0,
0,
308,
0,
0
],
[
1,
0,
0.087,
0.0435,
0,
0.66,
0.3333,
79,
0,
1,
0,
0,
79,
0,
0
],
[
3,
0,
0.5,
0.6957,
0,
0.66,
... | [
"from config import OfflineTMSingle_path",
"from os.path import join",
"class OfflineTMSingle:\n\tdef __init__(self, docId):\n\t\tself.path = join(OfflineTMSingle_path, str(docId))\n\t\n\tdef load(self):\n\t\tself.model = {}\n\t\tf = open(self.path, 'r')\n\t\tline = f.readlines()[0]",
"\tdef __init__(self, do... |
import os
from config import *
import logging
class Document:
'''
一篇文档的对象
'''
def __init__(self, docnum):
self.path = os.path.join(datapath, str(docnum))
if not os.path.exists(self.path):
logging.error('Not exists %s' % self.path)
f = open(self.path, 'r')
lines = f.readlines()
f.close()
self.title = lines[0].split('=')[1].strip()
self.abstract = lines[1].split('=')[1].strip()
self.body = lines[2].split('=')[1].strip()
self.comments = []
for line in lines[4:]:
self.comments.append(line)
def getTitle(self):
return self.title
def getAbstract(self):
return self.abstract
def getBody(self):
return self.body
def getComments(self):
return self.comments
def getFulltext(self):
return ' '.join([self.title, self.abstract, self.body])
if __name__ == '__main__':
logging.basicConfig(filename='log', format='%(asctime)s %(levelname)s %(message)s',
level=logging.DEBUG)
paper = Document(38300)
print paper.getTitle()
print paper.getAbstract()
print paper.getBody()
print '\n'.join(paper.getComments())
print '--------------------------------'
print paper.getPassageModels()
| [
[
1,
0,
0.0263,
0.0263,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0526,
0.0263,
0,
0.66,
0.3333,
308,
0,
1,
0,
0,
308,
0,
0
],
[
1,
0,
0.0789,
0.0263,
0,
... | [
"import os",
"from config import *",
"import logging",
"class Document:\n\tdef __init__(self, docnum):\n\t\tself.path = os.path.join(datapath, str(docnum))\n\t\tif not os.path.exists(self.path):\n\t\t\tlogging.error('Not exists %s' % self.path) \n\t\tf = open(self.path, 'r')\n\t\tlines = f.readlines()\n\t\tf.... |
from os.path import exists, join
from Document import Document
from Lexicon import Lexicon
from config import *
class Query:
'''
for a specific query indexed by its number
'''
def __init__(self, query_id, ans_doc, query):
self.query_id = query_id
self.ans_doc = ans_doc
self.query = query
#query有三个属性,ansdoc代表与这个query相关的文档编号,query代表这个query的字面内容,query_id是这个query的编号
def getAnsdoc(self):
return self.ans_doc
def getQuery(self):
return self.query
def getQueryId(self):
return self.query_id
def setAnsdoc(self, ans_doc):
self.ans_doc = ans_doc
def setQuery(self, query):
self.query = query
def setQueryId(self, query_id):
self.query_id = query_id
| [
[
1,
0,
0.0333,
0.0333,
0,
0.66,
0,
79,
0,
2,
0,
0,
79,
0,
0
],
[
1,
0,
0.0667,
0.0333,
0,
0.66,
0.25,
920,
0,
1,
0,
0,
920,
0,
0
],
[
1,
0,
0.1,
0.0333,
0,
0.66,
... | [
"from os.path import exists, join",
"from Document import Document",
"from Lexicon import Lexicon",
"from config import *",
"class Query:\n\t\n\t'''\n\tfor a specific query indexed by its number\n\t'''\n\tdef __init__(self, query_id, ans_doc, query):\n\t\tself.query_id = query_id\n\t\tself.ans_doc = ans_doc... |
from config import *
from os.path import exists
from Document import Document
from Lexicon import Lexicon
from os import mkdir,rmdir
import os.path
class ML:
'''Generate collection-wise and document-wise information , namely P(w|C), P(w|D)'''
'''doc是一个字符串形式,字符串字面可以为数字或collection,前者代表文档的ML,后者代表集合的ML'''
def __init__(self, doc):
'''
doc can be a document number or collection as the whole
doc must be str type
'''
self.doc = doc
#ML信息,列表形式,有很多0存在
def getML(self, lexicon, word_list):
'''
word num is 1-indexed
'''
numTerms = lexicon.getSize()+1
ML = []
for i in xrange(numTerms):
ML.append(0)
for word in word_list:
word_index = lexicon.getIdByTerm(word)
ML[word_index] = ML[word_index] + 1
ML[0] = ML[0] + 1
return ML
#这个和上面的意义一样,只是是一个字典的形式
def getML_dict(self, lexicon, word_list):
model = {}
model[0] = len(word_list)
for word in word_list:
word_index = lexicon.getIdByTerm(word)
if model.has_key(word_index):
model[word_index] = model[word_index] + 1
else:
model[word_index] = 1
return model
def create(self):
if exists(ML_path):
rmdir(ML_path)
mkdir(ML_path)
lexicon = Lexicon()
lexicon.load()
collection_content = ''
for doc_index in xrange(doccount):
print 'Processing ' + str(doc_index)
paper = Document(doc_index)
content = paper.getFulltext()
word_list = content.split()
doc_ML = self.getML(lexicon, word_list)
f = open(os.path.join(ML_path, str(doc_index)), 'w')
f.write(str(doc_ML[0])+'\n')
for i in xrange(1,len(doc_ML)):
if doc_ML[i] != 0:
f.write(str(i) + ' ' + str(doc_ML[i]) + '\n')
f.close()
collection_content = collection_content + content + ' ' + ' '.join(paper.getComments())
collection_list = collection_content.split()
collection_ML = self.getML(lexicon, collection_list)
f = open(collection_path, 'w')
f.write(str(collection_ML[0])+'\n')
for i in xrange(1, len(collection_ML)):
if collection_ML[i] != 0:
f.write(str(i) + ' ' + str(collection_ML[i]) + '\n')
f.close()
def load(self):
self.ML = {}
f = open(os.path.join(ML_path, self.doc), 'r')
lines = f.readlines()
f.close()
self.ML[0] = int(lines[0].strip())
for line in lines[1:]:
word_id = int(line.split()[0])
word_num = int(line.split()[1])
self.ML[word_id] = word_num
#根据词编号,返回这个ML中这个词的分数
def getProb(self, word_id):
if self.ML.has_key(word_id):
return 1.0 * self.ML[word_id] / self.ML[0]
return 0.0
def getWordsList(self):
return sorted(self.ML.keys())[1:]
'''
生成这篇文档的passage model,是一个字典的列表,列表的每一项为一个passage model的字典
'''
def getPassageModels(self):
paper = Document(self.doc)
self.models = []
items = paper.getFulltext().split()
doc_len = len(items)
lexicon = Lexicon()
lexicon.load()
left = []
right = []
for i in xrange(0, doc_len, passage_length):
left.append(i)
right.append(min(doc_len,i+passage_length))
#Add the tail to the last passage
length = len(left)
#因为最后一段往往不足100个词,所以将最后一段假如倒数第二段
if len(right) > 2:
right[-2] = right[-1]
length -= 1
for i in xrange(length):
self.models.append(self.getML_dict(lexicon, items[left[i]:right[i]]))
return self.models
if __name__ == '__main__':
ml = ML('2')
ml.create()
ml.load()
print ml.getProb(717)
#print ml.getWordsList()
print collection_path
print '------------------------'
#print ml.getPassageModels()
| [
[
1,
0,
0.0088,
0.0088,
0,
0.66,
0,
308,
0,
1,
0,
0,
308,
0,
0
],
[
1,
0,
0.0177,
0.0088,
0,
0.66,
0.1667,
79,
0,
1,
0,
0,
79,
0,
0
],
[
1,
0,
0.0265,
0.0088,
0,
0.... | [
"from config import *",
"from os.path import exists",
"from Document import Document",
"from Lexicon import Lexicon",
"from os import mkdir,rmdir",
"import os.path",
"class ML:\n\t'''Generate collection-wise and document-wise information , namely P(w|C), P(w|D)'''\n\t'''doc是一个字符串形式,字符串字面可以为数字或collection... |
from Lexicon import Lexicon
from ML import ML
from config import *
from TranslationModel import TranslationModel
from multiprocessing import Pipe, Process
import os
from os.path import join, exists
class OfflineTranslationModel:
'''
compute offline sum P(q|w)*P(w|D)
'''
def create_multi(self, begin, end, doc_list, trans_model):
# ans = []
for i in xrange(begin, end):
doc = i - begin
print 'Processing doc ' + str(i)
dic = {}
for wordid in doc_list[doc].getWordsList():
extensionlist = trans_model.getExtensionList(wordid)
for trans_id in extensionlist:
if dic.has_key(trans_id):
dic[trans_id] = dic[trans_id] + trans_model.getProb(wordid, trans_id) * doc_list[doc].getProb(wordid)
else:
dic[trans_id] = trans_model.getProb(wordid, trans_id) * doc_list[doc].getProb(wordid)
f = open(join(tmp_path,str(i)), 'w')
line = ''
for (key, value) in dic.items():
line = line + str(key) + ':' + str(value) + ' '
f.write(line+'\n')
f.close()
# ans.append(dic)
# conn.send(ans)
# conn.close()
def create(self,is_filt=False):
if os.path.exists(tmp_path):
os.rmdir(tmp_path)
os.mkdir(tmp_path)
lexicon = Lexicon()
lexicon.load()
doc_list = []
offline_tm = []
for doc in xrange(doccount):
ml = ML(str(doc))
ml.load()
doc_list.append(ml)
trans_model = TranslationModel()
trans_model.load(is_filt)
kernelnum = 16
# connectPairs = []
pools = []
for i in xrange(kernelnum):
# connectPairs.append(Pipe())
pools.append(Process(target=self.create_multi,args=(doccount*i/kernelnum,doccount*(i+1)/kernelnum, doc_list[doccount*i/kernelnum:doccount*(i+1)/kernelnum],trans_model)))
for i in xrange(kernelnum):
pools[i].start()
# for i in xrange(kernelnum):
# l = connectPairs[i][1].recv()
# for m in l:
# offline_tm.append(m)
# connectPairs[i][1].close()
for i in xrange(kernelnum):
pools[i].join()
wf = open(Offline_TM_path, 'w')
for doc in os.listdir(tmp_path):
f = open(join(tmp_path, str(doc)), 'r')
wf.write(f.read())
f.close()
wf.close()
def load(self):
self.offline_tm = []
f = open(Offline_TM_path, 'r')
lines = f.readlines()
f.close()
for i in xrange(len(lines)):
items = lines[i].split()
dic = {}
for item in items:
dic[int(item.split(':')[0])] = float(item.split(':')[1].strip())
self.offline_tm.append(dic)
def getProb(self, docId, wordId):
if self.offline_tm[docId].has_key(wordId):
return self.offline_tm[docId][wordId]
else:
return 0.0
if __name__ == '__main__':
otm = OfflineTranslationModel()
otm.create(True)
otm.load()
print otm.getProb(5182, 10242)
| [
[
1,
0,
0.0103,
0.0103,
0,
0.66,
0,
16,
0,
1,
0,
0,
16,
0,
0
],
[
1,
0,
0.0206,
0.0103,
0,
0.66,
0.125,
455,
0,
1,
0,
0,
455,
0,
0
],
[
1,
0,
0.0309,
0.0103,
0,
0.6... | [
"from Lexicon import Lexicon",
"from ML import ML",
"from config import *",
"from TranslationModel import TranslationModel",
"from multiprocessing import Pipe, Process",
"import os",
"from os.path import join, exists",
"class OfflineTranslationModel:\n\t'''\n\tcompute offline sum P(q|w)*P(w|D)\n\t'''\... |
from os.path import exists, join
from Document import Document
from Lexicon import Lexicon
from config import *
class TranslationModel:
def load(self, is_filt=False):
self.tm = []
for i in xrange(lexicon_size+1):
self.tm.append({})
tm_file = open(TM_path, 'r')
lines = tm_file.readlines()
tm_file.close()
for i in xrange(lexicon_size):
self.tm[i+1][0] = int(lines[i].split()[0])
pair_list = lines[i].split()[1:]
length = len(pair_list)
if is_filt and filter_num < length:
length = filter_num
for j in xrange(length):
pair = pair_list[j]
term_id = int(pair.split(':')[0])
count = int(pair.split(':')[1])
self.tm[i+1][term_id] = count
'''
在ATM中调用,获得global translation model的二阶字典表示
'''
def getTM_dict(self, is_filt=False):
tm = {}
tm_file = open(TM_path, 'r')
lines = tm_file.readlines()
tm_file.close()
for i in xrange(lexicon_size):
sum_num = int(lines[i].split()[0])
if sum_num == 0:
continue
tm[i+1] = {}
tm[i+1][0] = sum_num
pair_list = lines[i].split()[1:]
length = len(pair_list)
if is_filt and filter_num < length:
length = filter_num
for j in xrange(length):
pair = pair_list[j]
term_id = int(pair.split(':')[0])
count = int(pair.split(':')[1])
tm[i+1][term_id] = count
return tm
def create(self):
abstract = []
comments = []
for i in xrange(doccount):
paper = Document(i)
abstract.append(paper.getAbstract())
comments.append(paper.getComments())
self.lexicon = Lexicon()
self.lexicon.load()
self.tm = []
for i in xrange(lexicon_size+1):
self.tm.append({})
self.tm[i][0] = 0
for i in xrange(doccount):
print 'Processing doc %d' % i
if len(abstract[i]) != 0 and len(comments[i]) != 0:
self.__UniCalculate(abstract[i], comments[i])
tm_file = open(TM_path, 'w')
for i in xrange(1, lexicon_size+1):
line = str(self.tm[i][0]) + ' '
for (key, value) in sorted(self.tm[i].iteritems(), key=lambda d:d[1], reverse =
True)[1:]:
line = line + str(key) + ':' + str(value) + ' '
line = line + '\n'
tm_file.write(line)
tm_file.close()
def __UniCalculate(self, abstract, comment_list):
'''
Treat different comments of a document as difference comments
'''
abs_words = abstract.split()
self.__unify(abs_words)
for comment in comment_list:
comment_words = comment.split()
self.__unify(comment_words)
for aw in abs_words:
aw_id = self.lexicon.getIdByTerm(aw)
for cw in comment_words:
cw_id = self.lexicon.getIdByTerm(cw)
if self.tm[aw_id].has_key(cw_id):
self.tm[aw_id][cw_id] = self.tm[aw_id][cw_id] + 1
else:
self.tm[aw_id][cw_id] = 1
self.tm[aw_id][0] = self.tm[aw_id][0] + len(comment_words)
def __MultiCalculate(self, abstract, comment_list):
'''
Treat different comments of a document as only one comment
'''
abs_words = abstract.split()
comment_words = []
for comment in comment_words():
comment_words.extend(comment.strip())
self.__unify(abs_words)
self.__unify(comment_words)
for aw in abs_words:
aw_id = self.lexicon.getIdByTerm(aw)
for cw in comment_words:
cw_id = self.lexicon.getIdByTerm(cw)
if self.tm[aw_id].has_key(cw_id):
self.tm[aw_id][cw_id] = self.tm[aw_id][cw_id] + 1
else:
self.tm[aw_id][cw_id] = 1
self.tm[aw_id][0] = self.tm[aw_id][0] + len(comment_words)
def __unify(self, word_list):
return list(set(word_list))
def getProb(self, orig_id, dest_id):
if self.tm[orig_id].has_key(dest_id):
return 1.0 * self.tm[orig_id][dest_id] / self.tm[orig_id][0]
return 0.0
def getExtensionList(self, word_id):
return sorted(self.tm[word_id].keys())[1:]
if __name__ == '__main__':
tm = TranslationModel()
tm.create()
tm.load(False)
print tm.getProb(206,37107)
| [
[
1,
0,
0.0076,
0.0076,
0,
0.66,
0,
79,
0,
2,
0,
0,
79,
0,
0
],
[
1,
0,
0.0153,
0.0076,
0,
0.66,
0.25,
920,
0,
1,
0,
0,
920,
0,
0
],
[
1,
0,
0.0229,
0.0076,
0,
0.66... | [
"from os.path import exists, join",
"from Document import Document",
"from Lexicon import Lexicon",
"from config import *",
"class TranslationModel:\n\n\tdef load(self, is_filt=False):\n\t\tself.tm = []\n\t\tfor i in xrange(lexicon_size+1):\n\t\t\tself.tm.append({})\n\t\ttm_file = open(TM_path, 'r')\n\t\tli... |
'''
prerequisite:
1) allocate the term-id and doc-id;
2) build a language model for each passage (fixed window)
3) get an initialed translation model
'''
from ML import ML
from QueryManager import QueryManager
from Query import Query
from Lexicon import Lexicon
from TranslationModel import TranslationModel
from time import clock
from DictUtils import *
def printModel(dic):
for key in dic.keys():
line = str(key) + ' '
for k2 in dic[key].keys():
line = line + str(k2) + ':' + str(dic[key][k2])
print line
class ATM: # aligned translation model
'''
EM algorithm for training:
qd_reader: a reader for query-doc pair;
init_tm:
'''
def train(self, init_tm, iterate_num, model_diff):
prev_global_translation = init_tm
qm = QueryManager()
qm.load()
query_count = 100 #test for 1000 queries
lexicon = Lexicon()
lexicon.load()
for i in xrange(iterate_num):
t1 = clock()
print 'Iterate %d model :' % (i+1)
# printModel(prev_global_translation)
global_translation = {}
for query_index in xrange(query_count):
query = qm.getQuery(query_index)
q = []
for term in query.getQuery().split():
q.append(lexicon.getIdByTerm(term))
doc = ML(str(query.getAnsdoc()))
passage_models = doc.getPassageModels()
passage_scores = []
passage_translations = []
for lm in passage_models:
# t3 = clock()
passage_score, passage_translation = self.passage_score(q, lm, prev_global_translation)
passage_scores.append(passage_score)
passage_translations.append(passage_translation)
# t4 = clock()
# print 'Query pair cost %f s' % (t4-t3)
self.passage_norm(passage_scores)
doc_translation = self.doc_translation_norm(passage_translations, passage_scores)
self.update_translation(global_translation, doc_translation)
self.translation_norm(global_translation)
error = self.compare(prev_global_translation, global_translation)
print 'Iterate %d error %f .' % (i+1, error)
if(error < model_diff):
break;
prev_global_translation = global_translation;
t2 = clock()
print 'Iterate %d cost %f s' % (i+1, t2-t1)
self.model = global_translation
def writeModel(self):
f = open('EM_TM_path', 'w')#test path
for td in self.model.keys():
line = str(td) + ' '
for tq in self.model[td].keys():
line = line + str(tq) + ':' + str(self.model[td][tq]) + ' '
line = line + '\n'
f.write(line)
f.close()
def load(self):
f = open(EM_TM_path, 'r')
self.model = {}
lines = f.readlines()
f.close()
for line in lines:
items = line.split()
td = int(items[0])
for item in items[1:]:
tq = int(item.split(':')[0])
value = float(item.split(':')[1])
addElement_twolevel(self.model, td, tq ,value)
def getProb(self, td, tq):
return getElement_twolevel(self.model, td, tq)
def passage_score(self, q, lm, ptm):
score = 1.0
translation = {}
for td in lm.keys():
translation[td] = {}
for tq in q:
k_score = 0.0
for td in lm.keys():
if td == 0:
continue
p1 = getElement_twolevel(ptm, td, tq)
p2 = getElement_onelevel(lm, td)
if p1 * p2 == 0:
continue
translation[td][tq] = p1 * p2
k_score = k_score + p1 * p2
score = score * k_score
return (score, translation)
def passage_norm(self, passage_scores):
denominator = 0.0
for score in passage_scores:
denominator = denominator + score
if denominator == 0:
return
for i in xrange(len(passage_scores)):
passage_scores[i] = passage_scores[i] / denominator
def doc_translation_norm(self, passage_translations, passage_scores):
doc_translation = {}
for k in xrange(len(passage_scores)):
if passage_scores[k] == 0:
continue
for td in passage_translations[k].keys():
for tq in passage_translations[k][td].keys():
addElement_twolevel(doc_translation, tq, td, passage_scores[k] * passage_translations[k][td][tq])
for tq in doc_translation.keys():
for td in doc_translation[tq].keys():
if td == 0:
continue #Remember not do normalization to 0th element
doc_translation[tq][td] = doc_translation[tq][td] / doc_translation[tq][0]
doc_translation[tq][0] = 1.0
return doc_translation
def update_translation(self, global_translation, doc_translation):
for tq in doc_translation.keys():
for td in doc_translation[tq].keys():
if td == 0:
continue
addElement_twolevel(global_translation, td, tq, doc_translation[tq][td])
def translation_norm(self, global_translation):
for td in global_translation.keys():
for tq in global_translation[td].keys():
if tq == 0:
continue
global_translation[td][tq] = global_translation[td][tq] / global_translation[td][0]
global_translation[td][0] = 1.0
def compare(self, pt, gt):
diff = 0.0
td_list = set(pt.keys()) | set(gt.keys()) - set([0])
row = len(td_list)
col = 0
for td in td_list:
tq_list = set()
if pt.has_key(td):
tq_list = tq_list | set(pt[td].keys())
if gt.has_key(td):
tq_list = tq_list | set(gt[td].keys())
col += len(tq_list)
tq_list = tq_list - set([0])
for tq in tq_list:
diff = diff + abs(getElement_twolevel(pt, td, tq) - getElement_twolevel(gt, td, tq))
print 'row: %d col: %d' % (row, col/row)
return diff
if __name__ == '__main__':
atm = ATM()
tm_model = TranslationModel()
i_tm = tm_model.getTM_dict()
atm.train(i_tm, 500, 1e-5)
atm.writeModel()
| [
[
8,
0,
0.02,
0.0343,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.04,
0.0057,
0,
0.66,
0.1,
455,
0,
1,
0,
0,
455,
0,
0
],
[
1,
0,
0.0457,
0.0057,
0,
0.66,
0... | [
"'''\nprerequisite:\n1) allocate the term-id and doc-id;\n2) build a language model for each passage (fixed window)\n3) get an initialed translation model\n'''",
"from ML import ML",
"from QueryManager import QueryManager",
"from Query import Query",
"from Lexicon import Lexicon",
"from TranslationModel i... |
from os.path import join, exists
from config import *
from Document import Document
class Lexicon:
'''
整个数据集的词典表示
words are 1-indexed.
'''
def create(self):
word_dict = {}
for doc in xrange(doccount):
print 'Processing %d' % doc
paper = Document(doc)
comments = ''
for com in paper.getComments():
comments = comments + com + ' '
content = ' '.join([paper.getTitle(), paper.getAbstract(), paper.getBody(), comments])
words = content.split()
for word in words:
if not word_dict.has_key(word):
word_dict[word] = True
f = open(lexicon_path, 'w')
for word in word_dict.keys():
f.write(word + '\n')
f.close()
def load(self):
self.term2id = {}
self.id2term = {}
word_id = 1
f = open(lexicon_path, 'r')
words = f.readlines()
f.close()
for word in words:
word = word.strip()
self.term2id[word] = word_id
self.id2term[word_id] = word
word_id = word_id + 1
#通过词找编号
def getIdByTerm(self, term):
return self.term2id[term]
#通过编号找词
def getTermById(self, wordid):
return self.id2term[wordid]
#得到词典的大小
def getSize(self):
return len(self.term2id)
if __name__ == '__main__':
lexicon = Lexicon()
#lexicon.create()
lexicon.load()
print lexicon.getIdByTerm('music')
print lexicon.getTermById(2563)
print lexicon.getSize()
| [
[
1,
0,
0.0159,
0.0159,
0,
0.66,
0,
79,
0,
2,
0,
0,
79,
0,
0
],
[
1,
0,
0.0317,
0.0159,
0,
0.66,
0.25,
308,
0,
1,
0,
0,
308,
0,
0
],
[
1,
0,
0.0476,
0.0159,
0,
0.66... | [
"from os.path import join, exists",
"from config import *",
"from Document import Document",
"class Lexicon:\n\n\t'''\n\t整个数据集的词典表示\n\twords are 1-indexed.\n\t'''\n\n\tdef create(self):",
"\t'''\n\t整个数据集的词典表示\n\twords are 1-indexed.\n\t'''",
"\tdef create(self):\n\t\tword_dict = {}\n\t\tfor doc in xrange(... |
#预处理之后的原始数据路径
datapath='/mnt/disk1/luyang/paper_new/prunned_data/'
#文档个数
doccount=29353
#alpha=0.2
#beta=0.3
#translation model的截断数,即1个词最多翻译到filter_num个词
filter_num=1000
#词典的路径
lexicon_path='/mnt/disk1/luyang/paper_new/prunned_data/lexicon'
#collection model的路径
collection_path='/mnt/disk1/luyang/paper_new/ML_data/collection'
#生成的每篇文档的最大似然估计的路径
ML_path='/mnt/disk1/luyang/paper_new/ML_data/'
#所有的Query的路径
query_path='/mnt/disk1/luyang/paper_new/ML_data/Querys'
#global translation model文件的路径
TM_path='/mnt/disk1/luyang/paper_new/ML_data/TM'
#每篇文档的Offline translation model文件的路径
Offline_TM_path='/mnt/disk1/luyang/paper_new/ML_data/OfflineTM_500'
#临时路径,测试截断数的Offline Translation model,其中每个文件是一个文档的offline translation model
tmp_path='/mnt/disk1/luyang/paper_new/tmp_500/'
#决定使用的截断数之后,将tmp_path更新为OfflineTMSingle_path,原因是在计算每篇文档的分数时,只载入这篇文档的model,否则内存耗费非常大,尤其在多进程的时候
OfflineTMSingle_path='/mnt/disk1/luyang/paper_new/OfflineTMSingle'
#Aligned Translation Model 中一个passage的词的个数
passage_length=100
#使用Aligned Translation Model检索时,先用Language model检索,然后排在在前reranking_num的文档用ATM重排序
reranking_num=200
#词典中的词的个数
lexicon_size=78404
#Aligned Translation Model文件的位置
EM_TM_path='/mnt/disk1/luyang/paper_new/src/EM_TM_path'
| [
[
14,
0,
0.0625,
0.0312,
0,
0.66,
0,
716,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.125,
0.0312,
0,
0.66,
0.0714,
878,
1,
0,
0,
0,
0,
1,
0
],
[
14,
0,
0.25,
0.0312,
0,
0.66... | [
"datapath='/mnt/disk1/luyang/paper_new/prunned_data/'",
"doccount=29353",
"filter_num=1000",
"lexicon_path='/mnt/disk1/luyang/paper_new/prunned_data/lexicon'",
"collection_path='/mnt/disk1/luyang/paper_new/ML_data/collection'",
"ML_path='/mnt/disk1/luyang/paper_new/ML_data/'",
"query_path='/mnt/disk1/lu... |
#!/usr/bin/python2.6
#
# Simple http server to emulate api.playfoursquare.com
import logging
import shutil
import sys
import urlparse
import SimpleHTTPServer
import BaseHTTPServer
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Handle playfoursquare.com requests, for testing."""
def do_GET(self):
logging.warn('do_GET: %s, %s', self.command, self.path)
url = urlparse.urlparse(self.path)
logging.warn('do_GET: %s', url)
query = urlparse.parse_qs(url.query)
query_keys = [pair[0] for pair in query]
response = self.handle_url(url)
if response != None:
self.send_200()
shutil.copyfileobj(response, self.wfile)
self.wfile.close()
do_POST = do_GET
def handle_url(self, url):
path = None
if url.path == '/v1/venue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/addvenue':
path = '../captures/api/v1/venue.xml'
elif url.path == '/v1/venues':
path = '../captures/api/v1/venues.xml'
elif url.path == '/v1/user':
path = '../captures/api/v1/user.xml'
elif url.path == '/v1/checkcity':
path = '../captures/api/v1/checkcity.xml'
elif url.path == '/v1/checkins':
path = '../captures/api/v1/checkins.xml'
elif url.path == '/v1/cities':
path = '../captures/api/v1/cities.xml'
elif url.path == '/v1/switchcity':
path = '../captures/api/v1/switchcity.xml'
elif url.path == '/v1/tips':
path = '../captures/api/v1/tips.xml'
elif url.path == '/v1/checkin':
path = '../captures/api/v1/checkin.xml'
elif url.path == '/history/12345.rss':
path = '../captures/api/v1/feed.xml'
if path is None:
self.send_error(404)
else:
logging.warn('Using: %s' % path)
return open(path)
def send_200(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
def main():
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 8080
server_address = ('0.0.0.0', port)
httpd = BaseHTTPServer.HTTPServer(server_address, RequestHandler)
sa = httpd.socket.getsockname()
print "Serving HTTP on", sa[0], "port", sa[1], "..."
httpd.serve_forever()
if __name__ == '__main__':
main()
| [
[
1,
0,
0.0588,
0.0118,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0706,
0.0118,
0,
0.66,
0.125,
614,
0,
1,
0,
0,
614,
0,
0
],
[
1,
0,
0.0824,
0.0118,
0,
0... | [
"import logging",
"import shutil",
"import sys",
"import urlparse",
"import SimpleHTTPServer",
"import BaseHTTPServer",
"class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):\n \"\"\"Handle playfoursquare.com requests, for testing.\"\"\"\n\n def do_GET(self):\n logging.warn('do_GET: %s, %s',... |
#!/usr/bin/python
import os
import subprocess
import sys
BASEDIR = '../main/src/com/joelapenna/foursquare'
TYPESDIR = '../captures/types/v1'
captures = sys.argv[1:]
if not captures:
captures = os.listdir(TYPESDIR)
for f in captures:
basename = f.split('.')[0]
javaname = ''.join([c.capitalize() for c in basename.split('_')])
fullpath = os.path.join(TYPESDIR, f)
typepath = os.path.join(BASEDIR, 'types', javaname + '.java')
parserpath = os.path.join(BASEDIR, 'parsers', javaname + 'Parser.java')
cmd = 'python gen_class.py %s > %s' % (fullpath, typepath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
cmd = 'python gen_parser.py %s > %s' % (fullpath, parserpath)
print cmd
subprocess.call(cmd, stdout=sys.stdout, shell=True)
| [
[
1,
0,
0.1111,
0.037,
0,
0.66,
0,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.1481,
0.037,
0,
0.66,
0.1429,
394,
0,
1,
0,
0,
394,
0,
0
],
[
1,
0,
0.1852,
0.037,
0,
0.6... | [
"import os",
"import subprocess",
"import sys",
"BASEDIR = '../main/src/com/joelapenna/foursquare'",
"TYPESDIR = '../captures/types/v1'",
"captures = sys.argv[1:]",
"if not captures:\n captures = os.listdir(TYPESDIR)",
" captures = os.listdir(TYPESDIR)",
"for f in captures:\n basename = f.split('... |
#!/usr/bin/python
"""
Pull a oAuth protected page from foursquare.
Expects ~/.oget to contain (one on each line):
CONSUMER_KEY
CONSUMER_KEY_SECRET
USERNAME
PASSWORD
Don't forget to chmod 600 the file!
"""
import httplib
import os
import re
import sys
import urllib
import urllib2
import urlparse
import user
from xml.dom import pulldom
from xml.dom import minidom
import oauth
"""From: http://groups.google.com/group/foursquare-api/web/oauth
@consumer = OAuth::Consumer.new("consumer_token","consumer_secret", {
:site => "http://foursquare.com",
:scheme => :header,
:http_method => :post,
:request_token_path => "/oauth/request_token",
:access_token_path => "/oauth/access_token",
:authorize_path => "/oauth/authorize"
})
"""
SERVER = 'api.foursquare.com:80'
CONTENT_TYPE_HEADER = {'Content-Type' :'application/x-www-form-urlencoded'}
SIGNATURE_METHOD = oauth.OAuthSignatureMethod_HMAC_SHA1()
AUTHEXCHANGE_URL = 'http://api.foursquare.com/v1/authexchange'
def parse_auth_response(auth_response):
return (
re.search('<oauth_token>(.*)</oauth_token>', auth_response).groups()[0],
re.search('<oauth_token_secret>(.*)</oauth_token_secret>',
auth_response).groups()[0]
)
def create_signed_oauth_request(username, password, consumer):
oauth_request = oauth.OAuthRequest.from_consumer_and_token(
consumer, http_method='POST', http_url=AUTHEXCHANGE_URL,
parameters=dict(fs_username=username, fs_password=password))
oauth_request.sign_request(SIGNATURE_METHOD, consumer, None)
return oauth_request
def main():
url = urlparse.urlparse(sys.argv[1])
# Nevermind that the query can have repeated keys.
parameters = dict(urlparse.parse_qsl(url.query))
password_file = open(os.path.join(user.home, '.oget'))
lines = [line.strip() for line in password_file.readlines()]
if len(lines) == 4:
cons_key, cons_key_secret, username, password = lines
access_token = None
else:
cons_key, cons_key_secret, username, password, token, secret = lines
access_token = oauth.OAuthToken(token, secret)
consumer = oauth.OAuthConsumer(cons_key, cons_key_secret)
if not access_token:
oauth_request = create_signed_oauth_request(username, password, consumer)
connection = httplib.HTTPConnection(SERVER)
headers = {'Content-Type' :'application/x-www-form-urlencoded'}
connection.request(oauth_request.http_method, AUTHEXCHANGE_URL,
body=oauth_request.to_postdata(), headers=headers)
auth_response = connection.getresponse().read()
token = parse_auth_response(auth_response)
access_token = oauth.OAuthToken(*token)
open(os.path.join(user.home, '.oget'), 'w').write('\n'.join((
cons_key, cons_key_secret, username, password, token[0], token[1])))
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
access_token, http_method='POST', http_url=url.geturl(),
parameters=parameters)
oauth_request.sign_request(SIGNATURE_METHOD, consumer, access_token)
connection = httplib.HTTPConnection(SERVER)
connection.request(oauth_request.http_method, oauth_request.to_url(),
body=oauth_request.to_postdata(), headers=CONTENT_TYPE_HEADER)
print connection.getresponse().read()
#print minidom.parse(connection.getresponse()).toprettyxml(indent=' ')
if __name__ == '__main__':
main()
| [
[
8,
0,
0.0631,
0.0991,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1261,
0.009,
0,
0.66,
0.05,
2,
0,
1,
0,
0,
2,
0,
0
],
[
1,
0,
0.1351,
0.009,
0,
0.66,
0.... | [
"\"\"\"\nPull a oAuth protected page from foursquare.\n\nExpects ~/.oget to contain (one on each line):\nCONSUMER_KEY\nCONSUMER_KEY_SECRET\nUSERNAME\nPASSWORD",
"import httplib",
"import os",
"import re",
"import sys",
"import urllib",
"import urllib2",
"import urlparse",
"import user",
"from xml.... |
#!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna (joe@joelapenna.com)
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| [
[
1,
0,
0.0201,
0.0067,
0,
0.66,
0,
426,
0,
1,
0,
0,
426,
0,
0
],
[
1,
0,
0.0268,
0.0067,
0,
0.66,
0.0769,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0336,
0.0067,
0,
... | [
"import datetime",
"import sys",
"import textwrap",
"import common",
"from xml.dom import pulldom",
"PARSER = \"\"\"\\\n/**\n * Copyright 2009 Joe LaPenna\n */\n\npackage com.joelapenna.foursquare.parsers;\n\nimport com.joelapenna.foursquare.Foursquare;",
"BOOLEAN_STANZA = \"\"\"\\\n } else i... |
#!/usr/bin/python
import logging
from xml.dom import minidom
from xml.dom import pulldom
BOOLEAN = "boolean"
STRING = "String"
GROUP = "Group"
# Interfaces that all FoursquareTypes implement.
DEFAULT_INTERFACES = ['FoursquareType']
# Interfaces that specific FoursqureTypes implement.
INTERFACES = {
}
DEFAULT_CLASS_IMPORTS = [
]
CLASS_IMPORTS = {
# 'Checkin': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Venue': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
# 'Tip': DEFAULT_CLASS_IMPORTS + [
# 'import com.joelapenna.foursquare.filters.VenueFilterable'
# ],
}
COMPLEX = [
'Group',
'Badge',
'Beenhere',
'Checkin',
'CheckinResponse',
'City',
'Credentials',
'Data',
'Mayor',
'Rank',
'Score',
'Scoring',
'Settings',
'Stats',
'Tags',
'Tip',
'User',
'Venue',
]
TYPES = COMPLEX + ['boolean']
def WalkNodesForAttributes(path):
"""Parse the xml file getting all attributes.
<venue>
<attribute>value</attribute>
</venue>
Returns:
type_name - The java-style name the top node will have. "Venue"
top_node_name - unadultured name of the xml stanza, probably the type of
java class we're creating. "venue"
attributes - {'attribute': 'value'}
"""
doc = pulldom.parse(path)
type_name = None
top_node_name = None
attributes = {}
level = 0
for event, node in doc:
# For skipping parts of a tree.
if level > 0:
if event == pulldom.END_ELEMENT:
level-=1
logging.warn('(%s) Skip end: %s' % (str(level), node))
continue
elif event == pulldom.START_ELEMENT:
logging.warn('(%s) Skipping: %s' % (str(level), node))
level+=1
continue
if event == pulldom.START_ELEMENT:
logging.warn('Parsing: ' + node.tagName)
# Get the type name to use.
if type_name is None:
type_name = ''.join([word.capitalize()
for word in node.tagName.split('_')])
top_node_name = node.tagName
logging.warn('Found Top Node Name: ' + top_node_name)
continue
typ = node.getAttribute('type')
child = node.getAttribute('child')
# We don't want to walk complex types.
if typ in COMPLEX:
logging.warn('Found Complex: ' + node.tagName)
level = 1
elif typ not in TYPES:
logging.warn('Found String: ' + typ)
typ = STRING
else:
logging.warn('Found Type: ' + typ)
logging.warn('Adding: ' + str((node, typ)))
attributes.setdefault(node.tagName, (typ, [child]))
logging.warn('Attr: ' + str((type_name, top_node_name, attributes)))
return type_name, top_node_name, attributes
| [
[
1,
0,
0.0263,
0.0088,
0,
0.66,
0,
715,
0,
1,
0,
0,
715,
0,
0
],
[
1,
0,
0.0439,
0.0088,
0,
0.66,
0.0833,
290,
0,
1,
0,
0,
290,
0,
0
],
[
1,
0,
0.0526,
0.0088,
0,
... | [
"import logging",
"from xml.dom import minidom",
"from xml.dom import pulldom",
"BOOLEAN = \"boolean\"",
"STRING = \"String\"",
"GROUP = \"Group\"",
"DEFAULT_INTERFACES = ['FoursquareType']",
"INTERFACES = {\n}",
"DEFAULT_CLASS_IMPORTS = [\n]",
"CLASS_IMPORTS = {\n# 'Checkin': DEFAULT_CLASS_IMP... |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| [
[
1,
0,
0.0816,
0.0102,
0,
0.66,
0,
688,
0,
4,
0,
0,
688,
0,
0
],
[
14,
0,
0.1224,
0.0102,
0,
0.66,
0.0714,
792,
6,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1531,
0.0102,
0,
... | [
"import os, re, mimetypes, sys",
"SOURCE = sys.argv[1:]",
"COMMENT_BLOCK = re.compile(r\"(/\\*.+?\\*/)\", re.MULTILINE | re.DOTALL)",
"COMMENT_LICENSE = re.compile(r\"(license)\", re.IGNORECASE)",
"COMMENT_COPYRIGHT = re.compile(r\"(copyright)\", re.IGNORECASE)",
"EXCLUDE_TYPES = [\n \"application/xml\... |
#!/usr/bin/python2.6
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build the BITE Extension."""
__author__ = 'ralphj@google.com (Julie Ralph)'
import logging
import optparse
import os
import shutil
import subprocess
import urllib
import zipfile
CHECKOUT_ACE_COMMAND = ('git clone git://github.com/ajaxorg/ace.git')
CHECKOUT_CLOSURE_COMMAND = ('svn checkout http://closure-library.googlecode.com'
'/svn/trunk/ closure-library')
CHECKOUT_SELENIUM_COMMAND = ('svn checkout http://selenium.googlecode.com'
'/svn/trunk/javascript/atoms selenium-atoms-lib')
CLOSURE_COMPILER_URL = ('http://closure-compiler.googlecode.com/files/'
'compiler-latest.zip')
SOY_COMPILER_URL = ('http://closure-templates.googlecode.com/files/'
'closure-templates-for-javascript-latest.zip')
SOYDATA_URL = ('http://closure-templates.googlecode.com/svn/trunk/javascript/'
'soydata.js')
COMPILE_CLOSURE_COMMAND = ('closure-library/closure/bin/build/closurebuilder.py'
' --root=src'
' --root=closure-library'
' --root=build_gen'
' --root=selenium-atoms-lib'
' --input=%(input)s'
' --output_mode=compiled'
' --output_file=%(output)s'
' --compiler_jar=compiler.jar')
SOY_COMPILER_COMMAND = ('java -jar SoyToJsSrcCompiler.jar'
' --shouldProvideRequireSoyNamespaces'
' --outputPathFormat %(output)s'
' %(file)s')
class ClosureError(Exception):
pass
def BuildClosureScript(input_filename, output_filename):
"""Build a compiled closure script based on the given input file.
Args:
input_filename: A string representing the name of the input script to
compile
output_filename: A string representing the name of the output script.
Raises:
ClosureError: If closure fails to compile the given input file.
"""
result = ExecuteCommand(
COMPILE_CLOSURE_COMMAND % {
'input': input_filename,
'output': output_filename})
if result or not os.path.exists(output_filename):
raise ClosureError('Failed while compiling %s.' % input_filename)
def BuildSoyJs(input_file):
"""Builds a javascript file from a soy file.
Args:
input_file: A path to the soy file to compile into JavaScript. The js file
will be stored in build_gen/{FILENAME}.soy.js
Raises:
ClosureError: If the soy compiler fails to compile.
"""
output_name = os.path.join('build_gen', '%s.js' % input_file)
result = ExecuteCommand(
SOY_COMPILER_COMMAND % {
'file': input_file,
'output': output_name})
if result or not os.path.exists(output_name):
raise ClosureError('Failed while compiling the soy file %s.' % input_file)
def Clean():
if os.path.exists('clean'):
shutil.rmtree('build')
if os.path.exists('build_gen'):
shutil.rmtree('build_gen')
def ExecuteCommand(command):
"""Execute the given command and return the output.
Args:
command: A string representing the command to execute.
Returns:
The return code of the process.
"""
print 'Running command: %s' % command
process = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
results = process.communicate()
if process.returncode:
logging.error(results[1])
return process.returncode
def SetupAce():
"""Setup the Ace library.
Checkout the Ace library using git.
Raises:
ClosureError: If the setup fails.
"""
if not os.path.exists('ace'):
ExecuteCommand(CHECKOUT_ACE_COMMAND)
if not os.path.exists('ace'):
logging.error('Could not checkout ACE from github.')
raise ClosureError('Could not set up ACE.')
def SetupClosure():
"""Setup the closure library and compiler.
Checkout the closure library using svn if it doesn't exist. Also, download
the closure compiler.
Raises:
ClosureError: If the setup fails.
"""
# Set up the svn repo for closure if it doesn't exist.
if not os.path.exists('closure-library'):
ExecuteCommand(CHECKOUT_CLOSURE_COMMAND)
if not os.path.exists('closure-library'):
logging.error(('Could not check out the closure library from svn. '
'Please check out the closure library to the '
'"closure-library" directory.'))
raise ClosureError('Could not set up the closure library.')
# Download the compiler jar if it doesn't exist.
if not os.path.exists('compiler.jar'):
(compiler_zip, _) = urllib.urlretrieve(CLOSURE_COMPILER_URL)
compiler_zipfile = zipfile.ZipFile(compiler_zip)
compiler_zipfile.extract('compiler.jar')
if not os.path.exists('compiler.jar'):
logging.error('Could not download the closure compiler jar.')
raise ClosureError('Could not find the closure compiler.')
# Download the soy compiler jar if it doesn't exist.
if (not os.path.exists('SoyToJsSrcCompiler.jar') or
not os.path.exists('build_gen/soyutils_usegoog.js')):
(soy_compiler_zip, _) = urllib.urlretrieve(SOY_COMPILER_URL)
soy_compiler_zipfile = zipfile.ZipFile(soy_compiler_zip)
soy_compiler_zipfile.extract('SoyToJsSrcCompiler.jar')
soy_compiler_zipfile.extract('soyutils_usegoog.js', 'build_gen')
if (not os.path.exists('SoyToJsSrcCompiler.jar') or
not os.path.exists('build_gen/soyutils_usegoog.js')):
logging.error('Could not download the soy compiler jar.')
raise ClosureError('Could not find the soy compiler.')
# Download required soydata file, which is required for soyutils_usegoog.js
# to work.
if not os.path.exists('build_gen/soydata.js'):
urllib.urlretrieve(SOYDATA_URL, 'build_gen/soydata.js')
if not os.path.exists('build_gen/soydata.js'):
logging.error('Could not download soydata.js.')
raise ClosureError('Could not fine soydata.js')
def SetupSelenium():
"""Setup the selenium library.
Checkout necessary files from the selenium library using svn, if they
don't exist.
Raises:
ClosureError: If the setup fails.
"""
if not os.path.exists('selenium-atoms-lib/bot.js'):
ExecuteCommand(CHECKOUT_SELENIUM_COMMAND)
if not os.path.exists('selenium-atoms-lib/bot.js'):
logging.error('Could not download the selenium library.')
raise ClosureError('Could not find the selenium library.')
def main():
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage)
parser.add_option('--clean', dest='build_clean',
action='store_true', default=False,
help='Clean the build directories.')
(options, _) = parser.parse_args()
if options.build_clean:
Clean()
exit()
# Set up the directories that will be built into.
if not os.path.exists('build'):
os.mkdir('build')
if not os.path.exists('build/options'):
os.mkdir('build/options')
if not os.path.exists('build_gen'):
os.mkdir('build_gen')
# Get external resources.
SetupClosure()
SetupSelenium()
SetupAce()
# Compile the closure scripts.
soy_files = ['consoles.soy',
'rpfconsole.soy',
'rpf_dialogs.soy',
'locatorsupdater.soy',
'newbug_console.soy',
'newbug_type_selector.soy',
'popup.soy']
for soy_filename in soy_files:
BuildSoyJs(os.path.join('src', soy_filename))
js_targets = {'background.js': 'background_script.js',
'content.js': 'content_script.js',
'getactioninfo.js': 'getactioninfo_script.js',
'console.js': 'console_script.js',
'elementhelper.js': 'elementhelper_script.js',
'popup.js': 'popup_script.js',
'options/page.js': 'options_script.js'}
for target in js_targets:
BuildClosureScript(os.path.join('src', target),
os.path.join('build', js_targets[target]))
# Copy over the static resources
if os.path.exists('build/styles'):
shutil.rmtree('build/styles')
shutil.copytree('src/styles', 'build/styles')
if os.path.exists('build/imgs'):
shutil.rmtree('build/imgs')
shutil.copytree('src/imgs', 'build/imgs')
static_files = ['src/background.html',
'src/console.html',
'src/options/options.html',
'src/popup.html',
'manifest.json']
for static_file in static_files:
shutil.copy(static_file, 'build')
# Copy the required ACE files.
if os.path.exists('build/ace'):
shutil.rmtree('build/ace')
shutil.copytree('ace/build/src', 'build/ace')
if __name__ == '__main__':
main()
| [
[
8,
0,
0.065,
0.0036,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0722,
0.0036,
0,
0.66,
0.0385,
777,
1,
0,
0,
0,
0,
3,
0
],
[
1,
0,
0.0794,
0.0036,
0,
0.66,
... | [
"\"\"\"Build the BITE Extension.\"\"\"",
"__author__ = 'ralphj@google.com (Julie Ralph)'",
"import logging",
"import optparse",
"import os",
"import shutil",
"import subprocess",
"import urllib",
"import zipfile",
"CHECKOUT_ACE_COMMAND = ('git clone git://github.com/ajaxorg/ace.git')",
"CHECKOUT... |
'''
este script parsea los atributos de personajes y ciudades descargando los datos de
la url especificada en URL, y los guarda en formato JSON.
require python 2.7
'''
import urllib2
import re
import json
from __builtin__ import file
URL = r'http://www.gamefaqs.com/sms/588168-where-in-the-world-is-carmen-sandiego/faqs/58370'
RAW_DATA_FILENAME = 'website.txt'
def dict_parse(data):
data = re.sub(r' +', ' ', data, flags=re.MULTILINE)
data = re.sub(r'\n+', '\n', data, flags=re.MULTILINE)
items = re.findall(r"""
\.+ \n
(
(?:
\w+ \: [ ]+ [\w\ \?,]+ \n
)+
)
""", data, re.MULTILINE | re.DOTALL | re.X)
data_list = []
for item in items:
fields = item.split('\n')
data_dict = {}
for field in fields:
if not field:
continue
key, data = field.split(':')
data_dict[key] = data.lstrip().rstrip()
data_list.append(data_dict)
return data_list
def loot_parse(data):
lines = data.split('\n')
data_dict = {}
for line in lines:
if not line.startswith('-'):
continue
match = re.match(r"""
\-[ ]+
(
[^\(]+ # item
)
\ \((
[\w ]+ # country
)\)
""", line, flags=re.X)
data_dict[match.group(1)] = match.group(2)
return data_dict
SECTIONS = {
'cities': dict_parse,
'dossiers': dict_parse,
'loot': loot_parse,
}
def download_data(URL):
print 'descargando de %s...' % URL
req = urllib2.Request(URL, headers={'User-Agent' : "Some browser"})
con = urllib2.urlopen(req)
data = con.read()
print 'OK'
with open(RAW_DATA_FILENAME, 'w') as f:
f.write(data)
return data
def get_website_data():
try:
with open(RAW_DATA_FILENAME, 'r') as f:
data = f.read()
except (IOError, OSError):
data = download_data(URL)
return parse_website_data(data)
def parse_website_data(data):
match = re.search(r'\<pre\>(.*?)\<\/pre\>', data, re.DOTALL | re.MULTILINE)
data = match.group(1)
return data.lower().replace('\r', '')
def parse_sections(raw):
sections = re.split(r'\*+\r?\n', data, flags=re.DOTALL | re.MULTILINE)
selection = {}
indexes = '|'.join(['(?:%s)' % s for s in SECTIONS])
for section in sections:
match = re.match(r'[vix]*\. +(%s)\s*(.*)' % indexes, section, flags=re.DOTALL | re.MULTILINE)
if match:
section, content = match.group(1), match.group(2)
parser = SECTIONS[section]
selection[section] = parser(content)
return selection
if __name__ == '__main__':
data = get_website_data()
print 'parseando...'
sections = parse_sections(data)
print 'OK'
for k, v in sections.items():
filename = '%s.json' % k
print 'creando archivo %s' % filename
with open(filename, 'w') as fp:
json.dump(v, fp, indent=4)
print 'OK'
| [
[
8,
0,
0.0276,
0.0472,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0709,
0.0079,
0,
0.66,
0.0714,
345,
0,
1,
0,
0,
345,
0,
0
],
[
1,
0,
0.0787,
0.0079,
0,
0.66... | [
"'''\neste script parsea los atributos de personajes y ciudades descargando los datos de\nla url especificada en URL, y los guarda en formato JSON.\n\nrequire python 2.7\n'''",
"import urllib2",
"import re",
"import json",
"from __builtin__ import file",
"URL = r'http://www.gamefaqs.com/sms/588168-where-i... |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| [
[
1,
0,
0.0816,
0.0102,
0,
0.66,
0,
688,
0,
4,
0,
0,
688,
0,
0
],
[
14,
0,
0.1224,
0.0102,
0,
0.66,
0.0714,
792,
6,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.1531,
0.0102,
0,
... | [
"import os, re, mimetypes, sys",
"SOURCE = sys.argv[1:]",
"COMMENT_BLOCK = re.compile(r\"(/\\*.+?\\*/)\", re.MULTILINE | re.DOTALL)",
"COMMENT_LICENSE = re.compile(r\"(license)\", re.IGNORECASE)",
"COMMENT_COPYRIGHT = re.compile(r\"(copyright)\", re.IGNORECASE)",
"EXCLUDE_TYPES = [\n \"application/xml\... |
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Jaime Blasco on 2009-09-15
Uploaed to Labs on 2011-09-26 <--- First contribution :P
License:
Copyright (c) 2009 AlienVault
All rights reserved.
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 dated June, 1991.
You may not use, modify or distribute this program under any other version
of the GNU General Public License.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this package; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
MA 02110-1301 USA
On Debian GNU/Linux systems, the complete text of the GNU General
Public License can be found in `/usr/share/common-licenses/GPL-2'.
Otherwise you can read it here: http://www.gnu.org/licenses/gpl-2.0.txt
"""
import sys
import getopt
import fileinput
import commands
import re
from xmlobject import XMLFile
import libxml2
import libxslt
#Disks
#Memory
#CPU
#PCI devices
#tarjetas de red
#pfring
#Packages and versions
#Kernle parameters
class disk():
def __init__(self, id, size):
self.id = id
self.size = size
self.rVel = self.timeRead()
def timeRead(self):
print "Timing %s ..." % self.id
data = commands.getstatusoutput('hdparm -t /dev/%s' % self.id)[1]
data = data.split('\n')
# Timing buffered disk reads: 376 MB in 3.00 seconds = 125.18 MB/sec
p = re.compile(r".*= (?P<vel>[\d\.]+).*")
for d in data:
m = p.match(d)
if (m):
return float(m.group(1))
return None
class diskInfo():
def __init__(self):
self.disks = []
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('fdisk -l')[1]
data = data.split('\n')
#Disk /dev/sda: 73.4 GB, 73407488000 bytes
p = re.compile(r"Disk /dev/(?P<label>\S+):.*, (?P<cap>\d+)\sbytes.*")
for d in data:
m = p.match(d)
if (m):
label = m.group(1)
size = m.group(2)
di = disk(label, size)
self.disks.append(di)
class memInfo():
def __init__(self):
self.checkInfo()
self.memTotal = 0
self.memFree = 0
self.swapTotal = 0
self.swapFree = 0
def checkInfo(self):
data = commands.getstatusoutput('cat /proc/meminfo')[1]
data = data.split('\n')
p1 = re.compile(r"MemTotal:\s+(?P<mem>\d+)\skB.*")
p2 = re.compile(r"MemFree:\s+(?P<mem>\d+)\skB.*")
p3 = re.compile(r"SwapTotal:\s+(?P<mem>\d+)\skB.*")
p4 = re.compile(r"SwapFree:\s+(?P<mem>\d+)\skB.*")
for d in data:
m = p1.match(d)
if (m):
self.memTotal = m.group(1)
m = p2.match(d)
if (m):
self.memFree = m.group(1)
m = p3.match(d)
if (m):
self.swapTotal = m.group(1)
m = p4.match(d)
if (m):
self.swapFree = m.group(1)
class cpu():
def __init__(self, id, vendor_id, cpu_family, model_id, model_name):
self.id = id
self.vendor_id = vendor_id
self.cpu_family = cpu_family
self.model_id = model_id
self.model_name = model_name
def __repr__(self):
return "%s,%s,%s,%s,%s" % (self.id, self.vendor_id, self.cpu_family, self.model_id, self.model_name)
'''
def xml(self, dom):
cp = dom.createElement("cpu")
cp.id = self.id
cp.vendor_id = vendor_id
cp.cpu_family = cpu_family
cp.model_id = model_id
cp.model_name = model_name
return cp
'''
class cpuInfo():
def __init__(self):
self.cpus = []
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('cat /proc/cpuinfo')[1]
data = data.split('\n\n')
for d in data:
self.parseCpuData(d)
def parseCpuData(self, data):
data = data.split('\n')
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[0])
id = m.group(2)
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[1])
vendor_id = m.group(2)
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[2])
cpu_family = m.group(2)
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[3])
model_id = m.group(2)
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[4])
model_name = m.group(2)
cp = cpu(id, vendor_id, cpu_family, model_id, model_name)
#print cp
self.cpus.append(cp)
class pciDev():
def __init__(self, device, dClass, vendor, deviceName, sVendor, sDevice):
self.device = device
self.dClass = dClass
self.vendor = vendor
self.deviceName = deviceName
self.sVendor = sVendor
self.sDevice = sDevice
def __repr__(self):
return "%s,%s,%s,%s,%s,%s" % (self.device, self.dClass, self.vendor, self.deviceName, self.sVendor, self.sDevice)
class pciInfo():
def __init__(self):
self.devices = []
self.checkInfo()
def checkInfo(self):
"""Parse lspci info"""
data = commands.getstatusoutput('lspci -mm')[1]
data = data.split('\n')
#07:00.0 "Ethernet controller" "Intel Corporation" "80003ES2LAN Gigabit Ethernet Controller (Copper)" -r01 "ASUSTeK Computer Inc." "Device 8217"
#07:00.1 "Ethernet controller" "Intel Corporation" "80003ES2LAN Gigabit Ethernet Controller (Copper)" -r01 "ASUSTeK Computer Inc." "Device 8217"
for d in data:
d = d.split(' "')
device = d[0].replace('"', '')
dClass = d[1].replace('"', '')
vendor = d[2].replace('"', '')
deviceName = d[3].replace('"', '')
sVendor = d[4].replace('"', '')
sDevice = d[5].replace('"', '')
pDev = pciDev(device, dClass, vendor, deviceName, sVendor, sDevice)
self.devices.append(pDev)
class networkCard():
def __init__(self, interface, neg):
self.interface = interface
self.neg = neg
self.getRingParams()
def __repr__(self):
return "%s,%s,%s,%s,%s,%s" % (self.interface, self.neg, self.maxRX, self.maxTX, self.RX, self.TX)
def getRingParams(self):
data = commands.getstatusoutput('ethtool -g %s' % self.interface)[1]
data = data.split('\n')
p = re.compile(r"([\w\s+]+):([^\d]+)(?P<val>\d+)")
m = p.match(data[2])
self.maxRX = m.group(3)
m = p.match(data[5])
self.maxTX = m.group(3)
m = p.match(data[7])
self.RX = m.group(3)
m = p.match(data[10])
self.TX = m.group(3)
class networkInfo():
def __init__(self):
self.cards = []
self.checkInfo()
def checkInfo(self):
"""Parse Mii-tool and ethtool info"""
data = commands.getstatusoutput('mii-tool')[1]
data = data.split('\n')
#eth0: negotiated 100baseTx-FD, link ok
p = re.compile(r"(?P<in>[\d\w]+): negotiated (?P<vel>[\d\.]+).*")
for d in data:
m = p.match(d)
if (m):
interface = m.group(1)
neg = m.group(2)
card = networkCard(interface, neg)
self.cards.append(card)
class pkt():
def __init__(self, name, version, desc):
self.name = name
self.version = version
self.desc = desc
def __repr__(self):
return "%s,%s,%s" % (self.name, self.version, self.desc)
class packageInfo():
def __init__(self):
self.packages = []
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('dpkg -l')[1]
data = data.split('\n')
p = re.compile(r".. (?P<name>\S+)\s+(?P<ver>\S+)\s+(?P<desc>.*)")
for d in data:
m = p.match(d)
if m:
name = m.group(1)
version = m.group(2)
desc = m.group(3)
pk = pkt(name, version, desc)
self.packages.append(pk)
class pfringInfo():
def __init__(self):
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('cat /proc/net/pf_ring/info')[1]
data = data.split('\n')
if len(data) == 8:
p = re.compile(r"[^:]+: (?P<val>.*)")
m = p.match(data[0])
self.version = m.group(1)
m = p.match(data[1])
self.slots = m.group(1)
m = p.match(data[2])
self.sVersion = m.group(1)
m = p.match(data[3])
self.cTX = m.group(1)
m = p.match(data[4])
self.defrag = m.group(1)
m = p.match(data[5])
self.transparentMode = m.group(1)
m = p.match(data[6])
self.rings = m.group(1)
m = p.match(data[7])
self.plugins = m.group(1)
class kParam():
def __init__(self, name, val):
self.name = name
self.val = val
class kernelInfo():
def __init__(self):
self.parametes = []
self.version = ''
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('sysctl -a')[1]
data = data.split('\n')
p = re.compile(r"(?P<name>[^\s]+)\s=\s(?P<val>.*)")
for d in data:
m = p.match(d)
if m:
name = m.group(1)
val = m.group(2)
kP = kParam(name, val)
self.parametes.append(kP)
class systemInfo():
def __init__(self):
self.disk = diskInfo()
self.mem = memInfo()
self.cpu = cpuInfo()
self.pci = pciInfo()
self.network = networkInfo()
self.packages = packageInfo()
self.pfring = pfringInfo()
self.kernel = kernelInfo()
def xmlOut(self, fileName):
xmlStr = '<?xml version="1.0" encoding="UTF-8"?>' \
'<system></system>'
x = XMLFile(raw=xmlStr)
system = x.root
#CPU
xcp = system._addNode("cpuInfo")
for c in self.cpu.cpus:
xc = xcp._addNode("cpu")
for att in dir(c):
if att[0] != '_':
exec("xc.%s = c.%s" % (att, att))
xcp._addNode(xc)
#Memory
xme = system._addNode("memInfo")
for att in dir(self.mem):
if att[0] != '_' and att != "checkInfo":
exec("xme.%s = self.mem.%s" % (att, att))
#Disk
xdi = system._addNode("diskInfo")
for d in self.disk.disks:
xdis = xdi._addNode("disk")
for att in dir(d):
if att[0] != '_' and att != "timeRead":
exec("xdis.%s = d.%s" % (att, att))
xdi._addNode(xdis)
#PCI
xpci = system._addNode("pciInfo")
for m in self.pci.devices:
xp = xpci._addNode("pciModule")
for att in dir(m):
if att[0] != '_':
exec("xp.%s = m.%s" % (att, att))
xpci._addNode(xp)
#Packages
xpac = system._addNode("packages")
for p in self.packages.packages:
xpa = xpac._addNode("package")
for att in dir(p):
if att[0] != '_':
exec("xpa.%s = p.%s" % (att,att))
xpac._addNode(xpa)
#Kernel
xker = system._addNode("kernel")
for k in self.kernel.parametes:
xke = xker._addNode("parameter")
for att in dir(k):
if att[0] != '_':
exec("xke.%s = k.%s" % (att, att))
xker._addNode(xke)
#PFRING
xpfr = system._addNode("pfring")
for att in dir(self.pfring):
if att[0] != '_' and att != "checkInfo":
exec("xpfr.%s = self.pfring.%s" % (att, att))
#NETWORK
xnet = system._addNode("network")
for nc in self.network.cards:
xn = xnet._addNode("card")
for att in dir(nc):
if att[0] != '_' and att != "getRingParams":
exec("xn.%s = nc.%s" % (att,att))
xnet._addNode(xn)
#Write Results
f = open(fileName, 'w')
f.write(x.toxml())
f.close()
def applyXSLT(self, fileName):
pass
def main(argv=None):
s = systemInfo()
s.xmlOut("results.xml")
if __name__ == "__main__":
sys.exit(main())
| [
[
8,
0,
0.0437,
0.0713,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0828,
0.0023,
0,
0.66,
0.04,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0851,
0.0023,
0,
0.66,
... | [
"\"\"\"\nCreated by Jaime Blasco on 2009-09-15\nUploaed to Labs on 2011-09-26 <--- First contribution :P\n\nLicense:\n\nCopyright (c) 2009 AlienVault\nAll rights reserved.",
"import sys",
"import getopt",
"import fileinput",
"import commands",
"import re",
"from xmlobject import XMLFile",
"import libx... |
#!/usr/bin/env python
# encoding: utf-8
"""
Created by Jaime Blasco on 2009-09-15
Uploaed to Labs on 2011-09-26 <--- First contribution :P
License:
Copyright (c) 2009 AlienVault
All rights reserved.
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 dated June, 1991.
You may not use, modify or distribute this program under any other version
of the GNU General Public License.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this package; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
MA 02110-1301 USA
On Debian GNU/Linux systems, the complete text of the GNU General
Public License can be found in `/usr/share/common-licenses/GPL-2'.
Otherwise you can read it here: http://www.gnu.org/licenses/gpl-2.0.txt
"""
import sys
import getopt
import fileinput
import commands
import re
from xmlobject import XMLFile
import libxml2
import libxslt
#Disks
#Memory
#CPU
#PCI devices
#tarjetas de red
#pfring
#Packages and versions
#Kernle parameters
class disk():
def __init__(self, id, size):
self.id = id
self.size = size
self.rVel = self.timeRead()
def timeRead(self):
print "Timing %s ..." % self.id
data = commands.getstatusoutput('hdparm -t /dev/%s' % self.id)[1]
data = data.split('\n')
# Timing buffered disk reads: 376 MB in 3.00 seconds = 125.18 MB/sec
p = re.compile(r".*= (?P<vel>[\d\.]+).*")
for d in data:
m = p.match(d)
if (m):
return float(m.group(1))
return None
class diskInfo():
def __init__(self):
self.disks = []
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('fdisk -l')[1]
data = data.split('\n')
#Disk /dev/sda: 73.4 GB, 73407488000 bytes
p = re.compile(r"Disk /dev/(?P<label>\S+):.*, (?P<cap>\d+)\sbytes.*")
for d in data:
m = p.match(d)
if (m):
label = m.group(1)
size = m.group(2)
di = disk(label, size)
self.disks.append(di)
class memInfo():
def __init__(self):
self.checkInfo()
self.memTotal = 0
self.memFree = 0
self.swapTotal = 0
self.swapFree = 0
def checkInfo(self):
data = commands.getstatusoutput('cat /proc/meminfo')[1]
data = data.split('\n')
p1 = re.compile(r"MemTotal:\s+(?P<mem>\d+)\skB.*")
p2 = re.compile(r"MemFree:\s+(?P<mem>\d+)\skB.*")
p3 = re.compile(r"SwapTotal:\s+(?P<mem>\d+)\skB.*")
p4 = re.compile(r"SwapFree:\s+(?P<mem>\d+)\skB.*")
for d in data:
m = p1.match(d)
if (m):
self.memTotal = m.group(1)
m = p2.match(d)
if (m):
self.memFree = m.group(1)
m = p3.match(d)
if (m):
self.swapTotal = m.group(1)
m = p4.match(d)
if (m):
self.swapFree = m.group(1)
class cpu():
def __init__(self, id, vendor_id, cpu_family, model_id, model_name):
self.id = id
self.vendor_id = vendor_id
self.cpu_family = cpu_family
self.model_id = model_id
self.model_name = model_name
def __repr__(self):
return "%s,%s,%s,%s,%s" % (self.id, self.vendor_id, self.cpu_family, self.model_id, self.model_name)
'''
def xml(self, dom):
cp = dom.createElement("cpu")
cp.id = self.id
cp.vendor_id = vendor_id
cp.cpu_family = cpu_family
cp.model_id = model_id
cp.model_name = model_name
return cp
'''
class cpuInfo():
def __init__(self):
self.cpus = []
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('cat /proc/cpuinfo')[1]
data = data.split('\n\n')
for d in data:
self.parseCpuData(d)
def parseCpuData(self, data):
data = data.split('\n')
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[0])
id = m.group(2)
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[1])
vendor_id = m.group(2)
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[2])
cpu_family = m.group(2)
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[3])
model_id = m.group(2)
p = re.compile(r"([\w\s+]+)\t: (?P<val>.*)")
m = p.match(data[4])
model_name = m.group(2)
cp = cpu(id, vendor_id, cpu_family, model_id, model_name)
#print cp
self.cpus.append(cp)
class pciDev():
def __init__(self, device, dClass, vendor, deviceName, sVendor, sDevice):
self.device = device
self.dClass = dClass
self.vendor = vendor
self.deviceName = deviceName
self.sVendor = sVendor
self.sDevice = sDevice
def __repr__(self):
return "%s,%s,%s,%s,%s,%s" % (self.device, self.dClass, self.vendor, self.deviceName, self.sVendor, self.sDevice)
class pciInfo():
def __init__(self):
self.devices = []
self.checkInfo()
def checkInfo(self):
"""Parse lspci info"""
data = commands.getstatusoutput('lspci -mm')[1]
data = data.split('\n')
#07:00.0 "Ethernet controller" "Intel Corporation" "80003ES2LAN Gigabit Ethernet Controller (Copper)" -r01 "ASUSTeK Computer Inc." "Device 8217"
#07:00.1 "Ethernet controller" "Intel Corporation" "80003ES2LAN Gigabit Ethernet Controller (Copper)" -r01 "ASUSTeK Computer Inc." "Device 8217"
for d in data:
d = d.split(' "')
device = d[0].replace('"', '')
dClass = d[1].replace('"', '')
vendor = d[2].replace('"', '')
deviceName = d[3].replace('"', '')
sVendor = d[4].replace('"', '')
sDevice = d[5].replace('"', '')
pDev = pciDev(device, dClass, vendor, deviceName, sVendor, sDevice)
self.devices.append(pDev)
class networkCard():
def __init__(self, interface, neg):
self.interface = interface
self.neg = neg
self.getRingParams()
def __repr__(self):
return "%s,%s,%s,%s,%s,%s" % (self.interface, self.neg, self.maxRX, self.maxTX, self.RX, self.TX)
def getRingParams(self):
data = commands.getstatusoutput('ethtool -g %s' % self.interface)[1]
data = data.split('\n')
p = re.compile(r"([\w\s+]+):([^\d]+)(?P<val>\d+)")
m = p.match(data[2])
self.maxRX = m.group(3)
m = p.match(data[5])
self.maxTX = m.group(3)
m = p.match(data[7])
self.RX = m.group(3)
m = p.match(data[10])
self.TX = m.group(3)
class networkInfo():
def __init__(self):
self.cards = []
self.checkInfo()
def checkInfo(self):
"""Parse Mii-tool and ethtool info"""
data = commands.getstatusoutput('mii-tool')[1]
data = data.split('\n')
#eth0: negotiated 100baseTx-FD, link ok
p = re.compile(r"(?P<in>[\d\w]+): negotiated (?P<vel>[\d\.]+).*")
for d in data:
m = p.match(d)
if (m):
interface = m.group(1)
neg = m.group(2)
card = networkCard(interface, neg)
self.cards.append(card)
class pkt():
def __init__(self, name, version, desc):
self.name = name
self.version = version
self.desc = desc
def __repr__(self):
return "%s,%s,%s" % (self.name, self.version, self.desc)
class packageInfo():
def __init__(self):
self.packages = []
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('dpkg -l')[1]
data = data.split('\n')
p = re.compile(r".. (?P<name>\S+)\s+(?P<ver>\S+)\s+(?P<desc>.*)")
for d in data:
m = p.match(d)
if m:
name = m.group(1)
version = m.group(2)
desc = m.group(3)
pk = pkt(name, version, desc)
self.packages.append(pk)
class pfringInfo():
def __init__(self):
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('cat /proc/net/pf_ring/info')[1]
data = data.split('\n')
if len(data) == 8:
p = re.compile(r"[^:]+: (?P<val>.*)")
m = p.match(data[0])
self.version = m.group(1)
m = p.match(data[1])
self.slots = m.group(1)
m = p.match(data[2])
self.sVersion = m.group(1)
m = p.match(data[3])
self.cTX = m.group(1)
m = p.match(data[4])
self.defrag = m.group(1)
m = p.match(data[5])
self.transparentMode = m.group(1)
m = p.match(data[6])
self.rings = m.group(1)
m = p.match(data[7])
self.plugins = m.group(1)
class kParam():
def __init__(self, name, val):
self.name = name
self.val = val
class kernelInfo():
def __init__(self):
self.parametes = []
self.version = ''
self.checkInfo()
def checkInfo(self):
data = commands.getstatusoutput('sysctl -a')[1]
data = data.split('\n')
p = re.compile(r"(?P<name>[^\s]+)\s=\s(?P<val>.*)")
for d in data:
m = p.match(d)
if m:
name = m.group(1)
val = m.group(2)
kP = kParam(name, val)
self.parametes.append(kP)
class systemInfo():
def __init__(self):
self.disk = diskInfo()
self.mem = memInfo()
self.cpu = cpuInfo()
self.pci = pciInfo()
self.network = networkInfo()
self.packages = packageInfo()
self.pfring = pfringInfo()
self.kernel = kernelInfo()
def xmlOut(self, fileName):
xmlStr = '<?xml version="1.0" encoding="UTF-8"?>' \
'<system></system>'
x = XMLFile(raw=xmlStr)
system = x.root
#CPU
xcp = system._addNode("cpuInfo")
for c in self.cpu.cpus:
xc = xcp._addNode("cpu")
for att in dir(c):
if att[0] != '_':
exec("xc.%s = c.%s" % (att, att))
xcp._addNode(xc)
#Memory
xme = system._addNode("memInfo")
for att in dir(self.mem):
if att[0] != '_' and att != "checkInfo":
exec("xme.%s = self.mem.%s" % (att, att))
#Disk
xdi = system._addNode("diskInfo")
for d in self.disk.disks:
xdis = xdi._addNode("disk")
for att in dir(d):
if att[0] != '_' and att != "timeRead":
exec("xdis.%s = d.%s" % (att, att))
xdi._addNode(xdis)
#PCI
xpci = system._addNode("pciInfo")
for m in self.pci.devices:
xp = xpci._addNode("pciModule")
for att in dir(m):
if att[0] != '_':
exec("xp.%s = m.%s" % (att, att))
xpci._addNode(xp)
#Packages
xpac = system._addNode("packages")
for p in self.packages.packages:
xpa = xpac._addNode("package")
for att in dir(p):
if att[0] != '_':
exec("xpa.%s = p.%s" % (att,att))
xpac._addNode(xpa)
#Kernel
xker = system._addNode("kernel")
for k in self.kernel.parametes:
xke = xker._addNode("parameter")
for att in dir(k):
if att[0] != '_':
exec("xke.%s = k.%s" % (att, att))
xker._addNode(xke)
#PFRING
xpfr = system._addNode("pfring")
for att in dir(self.pfring):
if att[0] != '_' and att != "checkInfo":
exec("xpfr.%s = self.pfring.%s" % (att, att))
#NETWORK
xnet = system._addNode("network")
for nc in self.network.cards:
xn = xnet._addNode("card")
for att in dir(nc):
if att[0] != '_' and att != "getRingParams":
exec("xn.%s = nc.%s" % (att,att))
xnet._addNode(xn)
#Write Results
f = open(fileName, 'w')
f.write(x.toxml())
f.close()
def applyXSLT(self, fileName):
pass
def main(argv=None):
s = systemInfo()
s.xmlOut("results.xml")
if __name__ == "__main__":
sys.exit(main())
| [
[
8,
0,
0.0437,
0.0713,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0828,
0.0023,
0,
0.66,
0.04,
509,
0,
1,
0,
0,
509,
0,
0
],
[
1,
0,
0.0851,
0.0023,
0,
0.66,
... | [
"\"\"\"\nCreated by Jaime Blasco on 2009-09-15\nUploaed to Labs on 2011-09-26 <--- First contribution :P\n\nLicense:\n\nCopyright (c) 2009 AlienVault\nAll rights reserved.",
"import sys",
"import getopt",
"import fileinput",
"import commands",
"import re",
"from xmlobject import XMLFile",
"import libx... |
'''GTK User Interface code for ClearCutter'''
__author__ = "CP Constantine"
__email__ = "conrad@alienvault.com"
__copyright__ = 'Copyright:Alienvault 2012'
__credits__ = ["Conrad Constantine"]
__version__ = "0.1"
__license__ = "BSD"
__status__ = "Prototype"
__maintainer__ = "CP Constantine"
import gtk, gtk.glade, pygtk
class ClearCutterUI:
"""ClearCutter GTK frontend"""
gladefile = ""
wTree = ""
def __init__(self):
self.wTree = gtk.glade.XML("ccui.glade")
#Get the Main Window, and connect the "destroy" event
self.window = self.wTree.get_widget("MainWindow")
if (self.window):
self.window.connect("destroy", gtk.main_quit)
if __name__ == "__main__":
hwg = ClearCutterUI()
gtk.main() | [
[
8,
0,
0.0312,
0.0312,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.0938,
0.0312,
0,
0.66,
0.0909,
777,
1,
0,
0,
0,
0,
3,
0
],
[
14,
0,
0.125,
0.0312,
0,
0.66,... | [
"'''GTK User Interface code for ClearCutter'''",
"__author__ = \"CP Constantine\"",
"__email__ = \"conrad@alienvault.com\"",
"__copyright__ = 'Copyright:Alienvault 2012'",
"__credits__ = [\"Conrad Constantine\"]",
"__version__ = \"0.1\"",
"__license__ = \"BSD\"",
"__status__ = \"Prototype\"",
"__mai... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.