code
stringlengths
1
1.49M
vector
listlengths
0
7.38k
snippet
listlengths
0
7.38k
import cv2.cv as cv import tesseract import sys api = tesseract.TessBaseAPI() api.Init(".","eng",tesseract.OEM_DEFAULT) api.SetPageSegMode(tesseract.PSM_AUTO) image=cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE) tesseract.SetCvImage(image,api) text=api.GetUTF8Text() conf=api.MeanTextConf() print "scanned text: "+text
[ [ 1, 0, 0.0769, 0.0769, 0, 0.66, 0, 369, 0, 1, 0, 0, 369, 0, 0 ], [ 1, 0, 0.1538, 0.0769, 0, 0.66, 0.1, 332, 0, 1, 0, 0, 332, 0, 0 ], [ 1, 0, 0.2308, 0.0769, 0, 0.6...
[ "import cv2.cv as cv", "import tesseract", "import sys", "api = tesseract.TessBaseAPI()", "api.Init(\".\",\"eng\",tesseract.OEM_DEFAULT)", "api.SetPageSegMode(tesseract.PSM_AUTO)", "image=cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)", "tesseract.SetCvImage(image,api)", "text=api.GetUTF8Text...
#!/usr/bin/env python # -*- encoding:utf8 -*- # protoc-gen-erl # Google's Protocol Buffers project, ported to lua. # https://code.google.com/p/protoc-gen-lua/ # # Copyright (c) 2010 , 林卓毅 (Zhuoyi Lin) netsnail@gmail.com # All rights reserved. # # Use, modification and distribution are subject to the "New BSD License" # as listed at <url: http://www.opensource.org/licenses/bsd-license.php >. import sys import os.path as path from cStringIO import StringIO import plugin_pb2 import google.protobuf.descriptor_pb2 as descriptor_pb2 _packages = {} _files = {} _message = {} FDP = plugin_pb2.descriptor_pb2.FieldDescriptorProto if sys.platform == "win32": import msvcrt, os msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY) msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) class CppType: CPPTYPE_INT32 = 1 CPPTYPE_INT64 = 2 CPPTYPE_UINT32 = 3 CPPTYPE_UINT64 = 4 CPPTYPE_DOUBLE = 5 CPPTYPE_FLOAT = 6 CPPTYPE_BOOL = 7 CPPTYPE_ENUM = 8 CPPTYPE_STRING = 9 CPPTYPE_MESSAGE = 10 CPP_TYPE ={ FDP.TYPE_DOUBLE : CppType.CPPTYPE_DOUBLE, FDP.TYPE_FLOAT : CppType.CPPTYPE_FLOAT, FDP.TYPE_INT64 : CppType.CPPTYPE_INT64, FDP.TYPE_UINT64 : CppType.CPPTYPE_UINT64, FDP.TYPE_INT32 : CppType.CPPTYPE_INT32, FDP.TYPE_FIXED64 : CppType.CPPTYPE_UINT64, FDP.TYPE_FIXED32 : CppType.CPPTYPE_UINT32, FDP.TYPE_BOOL : CppType.CPPTYPE_BOOL, FDP.TYPE_STRING : CppType.CPPTYPE_STRING, FDP.TYPE_MESSAGE : CppType.CPPTYPE_MESSAGE, FDP.TYPE_BYTES : CppType.CPPTYPE_STRING, FDP.TYPE_UINT32 : CppType.CPPTYPE_UINT32, FDP.TYPE_ENUM : CppType.CPPTYPE_ENUM, FDP.TYPE_SFIXED32 : CppType.CPPTYPE_INT32, FDP.TYPE_SFIXED64 : CppType.CPPTYPE_INT64, FDP.TYPE_SINT32 : CppType.CPPTYPE_INT32, FDP.TYPE_SINT64 : CppType.CPPTYPE_INT64 } def printerr(*args): sys.stderr.write(" ".join(args)) sys.stderr.write("\n") sys.stderr.flush() class TreeNode(object): def __init__(self, name, parent=None, filename=None, package=None): super(TreeNode, self).__init__() self.child = [] self.parent = parent self.filename = filename self.package = package if parent: self.parent.add_child(self) self.name = name def add_child(self, child): self.child.append(child) def find_child(self, child_names): if child_names: for i in self.child: if i.name == child_names[0]: return i.find_child(child_names[1:]) raise StandardError else: return self def get_child(self, child_name): for i in self.child: if i.name == child_name: return i return None def get_path(self, end = None): pos = self out = [] while pos and pos != end: out.append(pos.name) pos = pos.parent out.reverse() return '.'.join(out) def get_global_name(self): return self.get_path() def get_local_name(self): pos = self while pos.parent: pos = pos.parent if self.package and pos.name == self.package[-1]: break return self.get_path(pos) def __str__(self): return self.to_string(0) def __repr__(self): return str(self) def to_string(self, indent = 0): return ' '*indent + '<TreeNode ' + self.name + '(\n' + \ ','.join([i.to_string(indent + 4) for i in self.child]) + \ ' '*indent +')>\n' class Env(object): filename = None package = None extend = None descriptor = None message = None context = None register = None def __init__(self): self.message_tree = TreeNode('') self.scope = self.message_tree def get_global_name(self): return self.scope.get_global_name() def get_local_name(self): return self.scope.get_local_name() def get_ref_name(self, type_name): try: node = self.lookup_name(type_name) except: # if the child doesn't be founded, it must be in this file return type_name[len('.'.join(self.package)) + 2:] if node.filename != self.filename: return node.filename + '_pb.' + node.get_local_name() return node.get_local_name() def lookup_name(self, name): names = name.split('.') if names[0] == '': return self.message_tree.find_child(names[1:]) else: return self.scope.parent.find_child(names) def enter_package(self, package): if not package: return self.message_tree names = package.split('.') pos = self.message_tree for i, name in enumerate(names): new_pos = pos.get_child(name) if new_pos: pos = new_pos else: return self._build_nodes(pos, names[i:]) return pos def enter_file(self, filename, package): self.filename = filename self.package = package.split('.') self._init_field() self.scope = self.enter_package(package) def exit_file(self): self._init_field() self.filename = None self.package = [] self.scope = self.scope.parent def enter(self, message_name): self.scope = TreeNode(message_name, self.scope, self.filename, self.package) def exit(self): self.scope = self.scope.parent def _init_field(self): self.descriptor = [] self.context = [] self.message = [] self.register = [] def _build_nodes(self, node, names): parent = node for i in names: parent = TreeNode(i, parent, self.filename, self.package) return parent class Writer(object): def __init__(self, prefix=None): self.io = StringIO() self.__indent = '' self.__prefix = prefix def getvalue(self): return self.io.getvalue() def __enter__(self): self.__indent += ' ' return self def __exit__(self, type, value, trackback): self.__indent = self.__indent[:-4] def __call__(self, data): self.io.write(self.__indent) if self.__prefix: self.io.write(self.__prefix) self.io.write(data) DEFAULT_VALUE = { FDP.TYPE_DOUBLE : '0.0', FDP.TYPE_FLOAT : '0.0', FDP.TYPE_INT64 : '0', FDP.TYPE_UINT64 : '0', FDP.TYPE_INT32 : '0', FDP.TYPE_FIXED64 : '0', FDP.TYPE_FIXED32 : '0', FDP.TYPE_BOOL : 'false', FDP.TYPE_STRING : '""', FDP.TYPE_MESSAGE : 'nil', FDP.TYPE_BYTES : '""', FDP.TYPE_UINT32 : '0', FDP.TYPE_ENUM : '1', FDP.TYPE_SFIXED32 : '0', FDP.TYPE_SFIXED64 : '0', FDP.TYPE_SINT32 : '0', FDP.TYPE_SINT64 : '0', } def code_gen_enum_item(index, enum_value, env): full_name = env.get_local_name() + '.' + enum_value.name obj_name = full_name.upper().replace('.', '_') + '_ENUM' env.descriptor.append( "local %s = protobuf.EnumValueDescriptor();\n"% obj_name ) context = Writer(obj_name) context('.name = "%s"\n' % enum_value.name) context('.index = %d\n' % index) context('.number = %d\n' % enum_value.number) env.context.append(context.getvalue()) return obj_name def code_gen_enum(enum_desc, env): env.enter(enum_desc.name) full_name = env.get_local_name() obj_name = full_name.upper().replace('.', '_') env.descriptor.append( "local %s = protobuf.EnumDescriptor();\n"% obj_name ) context = Writer(obj_name) context('.name = "%s"\n' % enum_desc.name) context('.full_name = "%s"\n' % env.get_global_name()) values = [] for i, enum_value in enumerate(enum_desc.value): values.append(code_gen_enum_item(i, enum_value, env)) context('.values = {%s}\n' % ','.join(values)) env.context.append(context.getvalue()) env.exit() return obj_name def code_gen_field(index, field_desc, env): full_name = env.get_local_name() + '.' + field_desc.name obj_name = full_name.upper().replace('.', '_') + '_FIELD' env.descriptor.append( "local %s = protobuf.FieldDescriptor();\n"% obj_name ) context = Writer(obj_name) context('.name = "%s"\n' % field_desc.name) context('.full_name = "%s"\n' % ( env.get_global_name() + '.' + field_desc.name)) context('.number = %d\n' % field_desc.number) context('.index = %d\n' % index) context('.label = %d\n' % field_desc.label) if field_desc.HasField("default_value"): context('.has_default_value = true\n') value = field_desc.default_value if field_desc.type == FDP.TYPE_STRING: context('.default_value = "%s"\n'%value) else: context('.default_value = %s\n'%value) else: context('.has_default_value = false\n') if field_desc.label == FDP.LABEL_REPEATED: default_value = "{}" elif field_desc.HasField('type_name'): default_value = "nil" else: default_value = DEFAULT_VALUE[field_desc.type] context('.default_value = %s\n' % default_value) if field_desc.HasField('type_name'): type_name = env.get_ref_name(field_desc.type_name).upper().replace('.', '_') if field_desc.type == FDP.TYPE_MESSAGE: context('.message_type = %s\n' % type_name) else: context('.enum_type = %s\n' % type_name) if field_desc.HasField('extendee'): type_name = env.get_ref_name(field_desc.extendee) env.register.append( "%s.RegisterExtension(%s)\n" % (type_name, obj_name) ) context('.type = %d\n' % field_desc.type) context('.cpp_type = %d\n\n' % CPP_TYPE[field_desc.type]) env.context.append(context.getvalue()) return obj_name def code_gen_message(message_descriptor, env, containing_type = None): env.enter(message_descriptor.name) full_name = env.get_local_name() obj_name = full_name.upper().replace('.', '_') env.descriptor.append( "local %s = protobuf.Descriptor();\n"% obj_name ) context = Writer(obj_name) context('.name = "%s"\n' % message_descriptor.name) context('.full_name = "%s"\n' % env.get_global_name()) nested_types = [] for msg_desc in message_descriptor.nested_type: msg_name = code_gen_message(msg_desc, env, obj_name) nested_types.append(msg_name) context('.nested_types = {%s}\n' % ', '.join(nested_types)) enums = [] for enum_desc in message_descriptor.enum_type: enums.append(code_gen_enum(enum_desc, env)) context('.enum_types = {%s}\n' % ', '.join(enums)) fields = [] for i, field_desc in enumerate(message_descriptor.field): fields.append(code_gen_field(i, field_desc, env)) context('.fields = {%s}\n' % ', '.join(fields)) if len(message_descriptor.extension_range) > 0: context('.is_extendable = true\n') else: context('.is_extendable = false\n') extensions = [] for i, field_desc in enumerate(message_descriptor.extension): extensions.append(code_gen_field(i, field_desc, env)) context('.extensions = {%s}\n' % ', '.join(extensions)) if containing_type: context('.containing_type = %s\n' % containing_type) env.message.append('%s = protobuf.Message(%s)\n' % (full_name, obj_name)) env.context.append(context.getvalue()) env.exit() return obj_name def write_header(writer): writer("""-- Generated By protoc-gen-lua Do not Edit """) def code_gen_file(proto_file, env, is_gen): filename = path.splitext(proto_file.name)[0] env.enter_file(filename, proto_file.package) includes = [] for f in proto_file.dependency: inc_file = path.splitext(f)[0] includes.append(inc_file) # for field_desc in proto_file.extension: # code_gen_extensions(field_desc, field_desc.name, env) for enum_desc in proto_file.enum_type: code_gen_enum(enum_desc, env) for enum_value in enum_desc.value: env.message.append('%s = %d\n' % (enum_value.name, enum_value.number)) for msg_desc in proto_file.message_type: code_gen_message(msg_desc, env) if is_gen: lua = Writer() write_header(lua) lua('local protobuf = require "protobuf"\n') for i in includes: lua('local %s_pb = require("%s_pb")\n' % (i, i)) lua("module('%s_pb')\n" % env.filename) lua('\n\n') map(lua, env.descriptor) lua('\n') map(lua, env.context) lua('\n') env.message.sort() map(lua, env.message) lua('\n') map(lua, env.register) _files[env.filename+ '_pb.lua'] = lua.getvalue() env.exit_file() def main(): plugin_require_bin = sys.stdin.read() code_gen_req = plugin_pb2.CodeGeneratorRequest() code_gen_req.ParseFromString(plugin_require_bin) env = Env() for proto_file in code_gen_req.proto_file: code_gen_file(proto_file, env, proto_file.name in code_gen_req.file_to_generate) code_generated = plugin_pb2.CodeGeneratorResponse() for k in _files: file_desc = code_generated.file.add() file_desc.name = k file_desc.content = _files[k] sys.stdout.write(code_generated.SerializeToString()) if __name__ == "__main__": main()
[ [ 1, 0, 0.0289, 0.0022, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0311, 0.0022, 0, 0.66, 0.0417, 79, 0, 1, 0, 0, 79, 0, 0 ], [ 1, 0, 0.0333, 0.0022, 0, 0....
[ "import sys", "import os.path as path", "from cStringIO import StringIO", "import plugin_pb2", "import google.protobuf.descriptor_pb2 as descriptor_pb2", "_packages = {}", "_files = {}", "_message = {}", "FDP = plugin_pb2.descriptor_pb2.FieldDescriptorProto", "if sys.platform == \"win32\":\n im...
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2012 Zdenko Podobný # Author: Zdenko Podobný # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple python demo script of tesseract-ocr 3.02 c-api """ import os import sys import ctypes # Demo variables lang = "eng" filename = "../phototest.tif" libpath = "/usr/local/lib64/" libpath_w = "../vs2008/DLL_Release/" TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX') if not TESSDATA_PREFIX: TESSDATA_PREFIX = "../" if sys.platform == "win32": libname = libpath_w + "libtesseract302.dll" libname_alt = "libtesseract302.dll" os.environ["PATH"] += os.pathsep + libpath_w else: libname = libpath + "libtesseract.so.3.0.2" libname_alt = "libtesseract.so.3" try: tesseract = ctypes.cdll.LoadLibrary(libname) except: try: tesseract = ctypes.cdll.LoadLibrary(libname_alt) except WindowsError, err: print("Trying to load '%s'..." % libname) print("Trying to load '%s'..." % libname_alt) print(err) exit(1) tesseract.TessVersion.restype = ctypes.c_char_p tesseract_version = tesseract.TessVersion()[:4] # We need to check library version because libtesseract.so.3 is symlink # and can point to other version than 3.02 if float(tesseract_version) < 3.02: print("Found tesseract-ocr library version %s." % tesseract_version) print("C-API is present only in version 3.02!") exit(2) api = tesseract.TessBaseAPICreate() rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang); if (rc): tesseract.TessBaseAPIDelete(api) print("Could not initialize tesseract.\n") exit(3) text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0); result_text = ctypes.string_at(text_out) print result_text
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.5, 0.25, 0, 0.66, 0.5, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.75, 0.25, 0, 0.66, 1, ...
[ "import os", "import sys", "import ctypes" ]
#!/usr/bin/python -u #-*- coding: UTF-8 -*- import subprocess import MySQLdb import os import re import sys import time import statvfs ip="114.80.213.44" ping = subprocess.Popen(["ping", "-c", "2", "-w", "500", ip], shell=False) ping.wait() if ping.returncode != 0: #print ping.returncode, "ERROR: failed to ping host. Please check." conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root') cursor=conn.cursor() conn.select_db('os') sql = "insert into down(datetime) values(%s)" param = (time.strftime('%Y-%m-%d',time.localtime(time.time()))) n = cursor.execute(sql,param) print n cursor.close() sys.exit(1) else: print "OK"
[ [ 1, 0, 0.1111, 0.037, 0, 0.66, 0, 394, 0, 1, 0, 0, 394, 0, 0 ], [ 1, 0, 0.1481, 0.037, 0, 0.66, 0.1, 838, 0, 1, 0, 0, 838, 0, 0 ], [ 1, 0, 0.1852, 0.037, 0, 0.66, ...
[ "import subprocess", "import MySQLdb", "import os", "import re", "import sys", "import time", "import statvfs", "ip=\"114.80.213.44\"", "ping = subprocess.Popen([\"ping\", \"-c\", \"2\", \"-w\", \"500\", ip], shell=False)", "ping.wait()", "if ping.returncode != 0:\n #print ping.returncode, \"...
#!/usr/bin/python -u #-*- coding: UTF-8 -*- import subprocess import MySQLdb import os import re import sys import time import statvfs ip="114.80.213.44" ping = subprocess.Popen(["ping", "-c", "2", "-w", "500", ip], shell=False) ping.wait() if ping.returncode != 0: #print ping.returncode, "ERROR: failed to ping host. Please check." conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root') cursor=conn.cursor() conn.select_db('os') sql = "insert into down(datetime) values(%s)" param = (time.strftime('%Y-%m-%d',time.localtime(time.time()))) n = cursor.execute(sql,param) print n cursor.close() sys.exit(1) else: print "OK"
[ [ 1, 0, 0.1111, 0.037, 0, 0.66, 0, 394, 0, 1, 0, 0, 394, 0, 0 ], [ 1, 0, 0.1481, 0.037, 0, 0.66, 0.1, 838, 0, 1, 0, 0, 838, 0, 0 ], [ 1, 0, 0.1852, 0.037, 0, 0.66, ...
[ "import subprocess", "import MySQLdb", "import os", "import re", "import sys", "import time", "import statvfs", "ip=\"114.80.213.44\"", "ping = subprocess.Popen([\"ping\", \"-c\", \"2\", \"-w\", \"500\", ip], shell=False)", "ping.wait()", "if ping.returncode != 0:\n #print ping.returncode, \"...
#!/usr/bin/python -u #-*- coding: UTF-8 -*- import subprocess import MySQLdb import os import re import sys import time import statvfs ip="114.80.213.44" ping = subprocess.Popen(["ping", "-c", "2", "-w", "500", ip], shell=False) ping.wait() if ping.returncode != 0: #print ping.returncode, "ERROR: failed to ping host. Please check." conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root') cursor=conn.cursor() conn.select_db('os') sql = "insert into down(datetime) values(%s)" param = (time.strftime('%Y-%m-%d',time.localtime(time.time()))) n = cursor.execute(sql,param) print n cursor.close() sys.exit(1) else: print "OK"
[ [ 1, 0, 0.1111, 0.037, 0, 0.66, 0, 394, 0, 1, 0, 0, 394, 0, 0 ], [ 1, 0, 0.1481, 0.037, 0, 0.66, 0.1, 838, 0, 1, 0, 0, 838, 0, 0 ], [ 1, 0, 0.1852, 0.037, 0, 0.66, ...
[ "import subprocess", "import MySQLdb", "import os", "import re", "import sys", "import time", "import statvfs", "ip=\"114.80.213.44\"", "ping = subprocess.Popen([\"ping\", \"-c\", \"2\", \"-w\", \"500\", ip], shell=False)", "ping.wait()", "if ping.returncode != 0:\n #print ping.returncode, \"...
#!/usr/bin/python -u #-*- coding: UTF-8 -*- import subprocess import MySQLdb import os import re import sys import time import statvfs ip="114.80.213.44" ping = subprocess.Popen(["ping", "-c", "2", "-w", "500", ip], shell=False) ping.wait() if ping.returncode != 0: #print ping.returncode, "ERROR: failed to ping host. Please check." conn = MySQLdb.connect(host='localhost',port=3306,user='root',passwd='root') cursor=conn.cursor() conn.select_db('os') sql = "insert into down(datetime) values(%s)" param = (time.strftime('%Y-%m-%d',time.localtime(time.time()))) n = cursor.execute(sql,param) print n cursor.close() sys.exit(1) else: print "OK"
[ [ 1, 0, 0.1111, 0.037, 0, 0.66, 0, 394, 0, 1, 0, 0, 394, 0, 0 ], [ 1, 0, 0.1481, 0.037, 0, 0.66, 0.1, 838, 0, 1, 0, 0, 838, 0, 0 ], [ 1, 0, 0.1852, 0.037, 0, 0.66, ...
[ "import subprocess", "import MySQLdb", "import os", "import re", "import sys", "import time", "import statvfs", "ip=\"114.80.213.44\"", "ping = subprocess.Popen([\"ping\", \"-c\", \"2\", \"-w\", \"500\", ip], shell=False)", "ping.wait()", "if ping.returncode != 0:\n #print ping.returncode, \"...
#!/usr/bin/python # Copyright 2011 Google, Inc. All Rights Reserved. # simple script to walk source tree looking for third-party licenses # dumps resulting html page to stdout import os, re, mimetypes, sys # read source directories to scan from command line SOURCE = sys.argv[1:] # regex to find /* */ style comment blocks COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL) # regex used to detect if comment block is a license COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE) COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE) EXCLUDE_TYPES = [ "application/xml", "image/png", ] # list of known licenses; keys are derived by stripping all whitespace and # forcing to lowercase to help combine multiple files that have same license. KNOWN_LICENSES = {} class License: def __init__(self, license_text): self.license_text = license_text self.filenames = [] # add filename to the list of files that have the same license text def add_file(self, filename): if filename not in self.filenames: self.filenames.append(filename) LICENSE_KEY = re.compile(r"[^\w]") def find_license(license_text): # TODO(alice): a lot these licenses are almost identical Apache licenses. # Most of them differ in origin/modifications. Consider combining similar # licenses. license_key = LICENSE_KEY.sub("", license_text).lower() if license_key not in KNOWN_LICENSES: KNOWN_LICENSES[license_key] = License(license_text) return KNOWN_LICENSES[license_key] def discover_license(exact_path, filename): # when filename ends with LICENSE, assume applies to filename prefixed if filename.endswith("LICENSE"): with open(exact_path) as file: license_text = file.read() target_filename = filename[:-len("LICENSE")] if target_filename.endswith("."): target_filename = target_filename[:-1] find_license(license_text).add_file(target_filename) return None # try searching for license blocks in raw file mimetype = mimetypes.guess_type(filename) if mimetype in EXCLUDE_TYPES: return None with open(exact_path) as file: raw_file = file.read() # include comments that have both "license" and "copyright" in the text for comment in COMMENT_BLOCK.finditer(raw_file): comment = comment.group(1) if COMMENT_LICENSE.search(comment) is None: continue if COMMENT_COPYRIGHT.search(comment) is None: continue find_license(comment).add_file(filename) for source in SOURCE: for root, dirs, files in os.walk(source): for name in files: discover_license(os.path.join(root, name), name) print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>" for license in KNOWN_LICENSES.values(): print "<h3>Notices for files:</h3><ul>" filenames = license.filenames filenames.sort() for filename in filenames: print "<li>%s</li>" % (filename) print "</ul>" print "<pre>%s</pre>" % license.license_text print "</body></html>"
[ [ 1, 0, 0.0816, 0.0102, 0, 0.66, 0, 688, 0, 4, 0, 0, 688, 0, 0 ], [ 14, 0, 0.1224, 0.0102, 0, 0.66, 0.0714, 792, 6, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1531, 0.0102, 0, ...
[ "import os, re, mimetypes, sys", "SOURCE = sys.argv[1:]", "COMMENT_BLOCK = re.compile(r\"(/\\*.+?\\*/)\", re.MULTILINE | re.DOTALL)", "COMMENT_LICENSE = re.compile(r\"(license)\", re.IGNORECASE)", "COMMENT_COPYRIGHT = re.compile(r\"(copyright)\", re.IGNORECASE)", "EXCLUDE_TYPES = [\n \"application/xml\...
#!/usr/bin/python # -*- coding: utf-8 -*- # Dieses Script kontrolliert eine Lösung. Es wird ausgegeben, wie viele Farben # verwendet wurden und wie groß das Dreieck ist. # Funktioniert nur mit korrekter Eingabedatei (keine Leerzeichen zum trennen der # Farben, neue Zeilen für eine neue Ebene des n-Traumrechtecks) import copy, os from optparse import OptionParser import psyco psyco.full() parser = OptionParser() parser.add_option("-i", "--input", dest="solutionfile", default= os.getcwd() + '/Beispiele/solution-4-errors.txt', help="Textdatei mit der Loesung", metavar="FILE") parser.add_option("-s", "--short", action="store_true", dest="short", default=False, help="Die Meldungen erscheinen in Kurzform.") parser.add_option("-e", "--errors", action="store_true", dest="display_errors", default=False, help="Die Fehler werden ausgegeben.") (options, args) = parser.parse_args() solutionfile = options.solutionfile display_errors=options.display_errors # 12345m = x # 2 # 3 # 4 # 5 # n = y # Get Data###################################################################### # my_data[ebene - startet oben ][position - startet links] def check_all_left_top(rectangle, x1, y1): # Auf Fehler überprüfen: # Die aktuelle kugel (x1, y1) ist links oben error_list = [] links_oben = rectangle[x1][y1] for x2 in xrange(x1+1, m): rechts_oben = rectangle[x2][y1] for y2 in xrange(y1+1, n): links_unten = rectangle[x1][y2] rechts_unten = rectangle[x2][y2] if links_oben == links_unten == rechts_oben == rechts_unten: error_list.append([(x1,y1),(x1,y2), (x2, y1), (x2, y2)]) return error_list def get_all_errors(rectangle): errors = [] for x1, ebene in enumerate(rectangle): for y1, farbe in enumerate(ebene): errors_tmp = check_all_left_top(rectangle, x1, y1) for line in errors_tmp: errors.append(line) return errors def get_data(filename): my_data = [] f = open(filename, 'r') lines = f.readlines() for line in lines: temp = line.strip() #temp = temp.split(' ') my_data.append(temp) return my_data def get_colors(my_data): colors = [] for liste in my_data: for color in liste: if not color in colors: colors.append(color) return colors rectangle = get_data(solutionfile) color_list = get_colors(rectangle) n = len(rectangle) m = len(rectangle[0]) error_list = get_all_errors(rectangle) print str(n) + "x" + str(m) print "Es wurden " + str(len(color_list)) + " Farben verwendet." print "Es wurden " + str(len(error_list))+ " Fehler gefunden." if display_errors: for error in error_list: print error
[ [ 1, 0, 0.0957, 0.0106, 0, 0.66, 0, 739, 0, 2, 0, 0, 739, 0, 0 ], [ 1, 0, 0.1064, 0.0106, 0, 0.66, 0.0435, 323, 0, 1, 0, 0, 323, 0, 0 ], [ 1, 0, 0.1277, 0.0106, 0, ...
[ "import copy, os", "from optparse import OptionParser", "import psyco", "psyco.full()", "parser = OptionParser()", "parser.add_option(\"-i\", \"--input\", dest=\"solutionfile\", \n default= os.getcwd() + '/Beispiele/solution-4-errors.txt',\n help=\"Textdatei mit der Loesung\"...
#!/usr/bin/python # -*- coding: utf-8 -*- # Dieses Script kontrolliert eine Lösung. Es wird ausgegeben, wie viele Farben # verwendet wurden und wie groß das Dreieck ist. # Funktioniert nur mit korrekter Eingabedatei (keine Leerzeichen zum trennen der # Farben, neue Zeilen für eine neue Ebene des n-Traumrechtecks) import copy, os from optparse import OptionParser import psyco psyco.full() parser = OptionParser() parser.add_option("-i", "--input", dest="solutionfile", default= os.getcwd() + '/Beispiele/solution-4-errors.txt', help="Textdatei mit der Loesung", metavar="FILE") parser.add_option("-s", "--short", action="store_true", dest="short", default=False, help="Die Meldungen erscheinen in Kurzform.") parser.add_option("-e", "--errors", action="store_true", dest="display_errors", default=False, help="Die Fehler werden ausgegeben.") (options, args) = parser.parse_args() solutionfile = options.solutionfile display_errors=options.display_errors # 12345m = x # 2 # 3 # 4 # 5 # n = y # Get Data###################################################################### # my_data[ebene - startet oben ][position - startet links] def check_all_left_top(rectangle, x1, y1): # Auf Fehler überprüfen: # Die aktuelle kugel (x1, y1) ist links oben error_list = [] links_oben = rectangle[x1][y1] for x2 in xrange(x1+1, m): rechts_oben = rectangle[x2][y1] for y2 in xrange(y1+1, n): links_unten = rectangle[x1][y2] rechts_unten = rectangle[x2][y2] if links_oben == links_unten == rechts_oben == rechts_unten: error_list.append([(x1,y1),(x1,y2), (x2, y1), (x2, y2)]) return error_list def get_all_errors(rectangle): errors = [] for x1, ebene in enumerate(rectangle): for y1, farbe in enumerate(ebene): errors_tmp = check_all_left_top(rectangle, x1, y1) for line in errors_tmp: errors.append(line) return errors def get_data(filename): my_data = [] f = open(filename, 'r') lines = f.readlines() for line in lines: temp = line.strip() #temp = temp.split(' ') my_data.append(temp) return my_data def get_colors(my_data): colors = [] for liste in my_data: for color in liste: if not color in colors: colors.append(color) return colors rectangle = get_data(solutionfile) color_list = get_colors(rectangle) n = len(rectangle) m = len(rectangle[0]) error_list = get_all_errors(rectangle) print str(n) + "x" + str(m) print "Es wurden " + str(len(color_list)) + " Farben verwendet." print "Es wurden " + str(len(error_list))+ " Fehler gefunden." if display_errors: for error in error_list: print error
[ [ 1, 0, 0.0957, 0.0106, 0, 0.66, 0, 739, 0, 2, 0, 0, 739, 0, 0 ], [ 1, 0, 0.1064, 0.0106, 0, 0.66, 0.0435, 323, 0, 1, 0, 0, 323, 0, 0 ], [ 1, 0, 0.1277, 0.0106, 0, ...
[ "import copy, os", "from optparse import OptionParser", "import psyco", "psyco.full()", "parser = OptionParser()", "parser.add_option(\"-i\", \"--input\", dest=\"solutionfile\", \n default= os.getcwd() + '/Beispiele/solution-4-errors.txt',\n help=\"Textdatei mit der Loesung\"...
''' Module which brings history information about files from Mercurial. @author: Rodrigo Damazio ''' import re import subprocess REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*') def _GetOutputLines(args): ''' Runs an external process and returns its output as a list of lines. @param args: the arguments to run ''' process = subprocess.Popen(args, stdout=subprocess.PIPE, universal_newlines = True, shell = False) output = process.communicate()[0] return output.splitlines() def FillMercurialRevisions(filename, parsed_file): ''' Fills the revs attribute of all strings in the given parsed file with a list of revisions that touched the lines corresponding to that string. @param filename: the name of the file to get history for @param parsed_file: the parsed file to modify ''' # Take output of hg annotate to get revision of each line output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename]) # Create a map of line -> revision (key is list index, line 0 doesn't exist) line_revs = ['dummy'] for line in output_lines: rev_match = REVISION_REGEX.match(line) if not rev_match: raise 'Unexpected line of output from hg: %s' % line rev_hash = rev_match.group('hash') line_revs.append(rev_hash) for str in parsed_file.itervalues(): # Get the lines that correspond to each string start_line = str['startLine'] end_line = str['endLine'] # Get the revisions that touched those lines revs = [] for line_number in range(start_line, end_line + 1): revs.append(line_revs[line_number]) # Merge with any revisions that were already there # (for explict revision specification) if 'revs' in str: revs += str['revs'] # Assign the revisions to the string str['revs'] = frozenset(revs) def DoesRevisionSuperceed(filename, rev1, rev2): ''' Tells whether a revision superceeds another. This essentially means that the older revision is an ancestor of the newer one. This also returns True if the two revisions are the same. @param rev1: the revision that may be superceeding the other @param rev2: the revision that may be superceeded @return: True if rev1 superceeds rev2 or they're the same ''' if rev1 == rev2: return True # TODO: Add filename args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename] output_lines = _GetOutputLines(args) return rev2 in output_lines def NewestRevision(filename, rev1, rev2): ''' Returns which of two revisions is closest to the head of the repository. If none of them is the ancestor of the other, then we return either one. @param rev1: the first revision @param rev2: the second revision ''' if DoesRevisionSuperceed(filename, rev1, rev2): return rev1 return rev2
[ [ 8, 0, 0.0319, 0.0532, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0745, 0.0106, 0, 0.66, 0.1429, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0851, 0.0106, 0, 0.66...
[ "'''\nModule which brings history information about files from Mercurial.\n\n@author: Rodrigo Damazio\n'''", "import re", "import subprocess", "REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')", "def _GetOutputLines(args):\n '''\n Runs an external process and returns its output as a list of lines...
#!/usr/bin/python ''' Entry point for My Tracks i18n tool. @author: Rodrigo Damazio ''' import mytracks.files import mytracks.translate import mytracks.validate import sys def Usage(): print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0] print 'Commands are:' print ' cleanup' print ' translate' print ' validate' sys.exit(1) def Translate(languages): ''' Asks the user to interactively translate any missing or oudated strings from the files for the given languages. @param languages: the languages to translate ''' validator = mytracks.validate.Validator(languages) validator.Validate() missing = validator.missing_in_lang() outdated = validator.outdated_in_lang() for lang in languages: untranslated = missing[lang] + outdated[lang] if len(untranslated) == 0: continue translator = mytracks.translate.Translator(lang) translator.Translate(untranslated) def Validate(languages): ''' Computes and displays errors in the string files for the given languages. @param languages: the languages to compute for ''' validator = mytracks.validate.Validator(languages) validator.Validate() error_count = 0 if (validator.valid()): print 'All files OK' else: for lang, missing in validator.missing_in_master().iteritems(): print 'Missing in master, present in %s: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, missing in validator.missing_in_lang().iteritems(): print 'Missing in %s, present in master: %s:' % (lang, str(missing)) error_count = error_count + len(missing) for lang, outdated in validator.outdated_in_lang().iteritems(): print 'Outdated in %s: %s:' % (lang, str(outdated)) error_count = error_count + len(outdated) return error_count if __name__ == '__main__': argv = sys.argv argc = len(argv) if argc < 2: Usage() languages = mytracks.files.GetAllLanguageFiles() if argc == 3: langs = set(argv[2:]) if not langs.issubset(languages): raise 'Language(s) not found' # Filter just to the languages specified languages = dict((lang, lang_file) for lang, lang_file in languages.iteritems() if lang in langs or lang == 'en' ) cmd = argv[1] if cmd == 'translate': Translate(languages) elif cmd == 'validate': error_count = Validate(languages) else: Usage() error_count = 0 print '%d errors found.' % error_count
[ [ 8, 0, 0.0417, 0.0521, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0833, 0.0104, 0, 0.66, 0.125, 640, 0, 1, 0, 0, 640, 0, 0 ], [ 1, 0, 0.0938, 0.0104, 0, 0.66,...
[ "'''\nEntry point for My Tracks i18n tool.\n\n@author: Rodrigo Damazio\n'''", "import mytracks.files", "import mytracks.translate", "import mytracks.validate", "import sys", "def Usage():\n print('Usage: %s <command> [<language> ...]\\n' % sys.argv[0])\n print('Commands are:')\n print(' cleanup')\n p...
''' Module which prompts the user for translations and saves them. TODO: implement @author: Rodrigo Damazio ''' class Translator(object): ''' classdocs ''' def __init__(self, language): ''' Constructor ''' self._language = language def Translate(self, string_names): print string_names
[ [ 8, 0, 0.1905, 0.3333, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.7143, 0.619, 0, 0.66, 1, 229, 0, 2, 0, 0, 186, 0, 1 ], [ 8, 1, 0.5238, 0.1429, 1, 0.61, ...
[ "'''\nModule which prompts the user for translations and saves them.\n\nTODO: implement\n\n@author: Rodrigo Damazio\n'''", "class Translator(object):\n '''\n classdocs\n '''\n\n def __init__(self, language):\n '''\n Constructor", " '''\n classdocs\n '''", " def __init__(self, language):\n '''...
''' Module which compares languague files to the master file and detects issues. @author: Rodrigo Damazio ''' import os from mytracks.parser import StringsParser import mytracks.history class Validator(object): def __init__(self, languages): ''' Builds a strings file validator. Params: @param languages: a dictionary mapping each language to its corresponding directory ''' self._langs = {} self._master = None self._language_paths = languages parser = StringsParser() for lang, lang_dir in languages.iteritems(): filename = os.path.join(lang_dir, 'strings.xml') parsed_file = parser.Parse(filename) mytracks.history.FillMercurialRevisions(filename, parsed_file) if lang == 'en': self._master = parsed_file else: self._langs[lang] = parsed_file self._Reset() def Validate(self): ''' Computes whether all the data in the files for the given languages is valid. ''' self._Reset() self._ValidateMissingKeys() self._ValidateOutdatedKeys() def valid(self): return (len(self._missing_in_master) == 0 and len(self._missing_in_lang) == 0 and len(self._outdated_in_lang) == 0) def missing_in_master(self): return self._missing_in_master def missing_in_lang(self): return self._missing_in_lang def outdated_in_lang(self): return self._outdated_in_lang def _Reset(self): # These are maps from language to string name list self._missing_in_master = {} self._missing_in_lang = {} self._outdated_in_lang = {} def _ValidateMissingKeys(self): ''' Computes whether there are missing keys on either side. ''' master_keys = frozenset(self._master.iterkeys()) for lang, file in self._langs.iteritems(): keys = frozenset(file.iterkeys()) missing_in_master = keys - master_keys missing_in_lang = master_keys - keys if len(missing_in_master) > 0: self._missing_in_master[lang] = missing_in_master if len(missing_in_lang) > 0: self._missing_in_lang[lang] = missing_in_lang def _ValidateOutdatedKeys(self): ''' Computers whether any of the language keys are outdated with relation to the master keys. ''' for lang, file in self._langs.iteritems(): outdated = [] for key, str in file.iteritems(): # Get all revisions that touched master and language files for this # string. master_str = self._master[key] master_revs = master_str['revs'] lang_revs = str['revs'] if not master_revs or not lang_revs: print 'WARNING: No revision for %s in %s' % (key, lang) continue master_file = os.path.join(self._language_paths['en'], 'strings.xml') lang_file = os.path.join(self._language_paths[lang], 'strings.xml') # Assume that the repository has a single head (TODO: check that), # and as such there is always one revision which superceeds all others. master_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2), master_revs) lang_rev = reduce( lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2), lang_revs) # If the master version is newer than the lang version if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev): outdated.append(key) if len(outdated) > 0: self._outdated_in_lang[lang] = outdated
[ [ 8, 0, 0.0304, 0.0522, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66, 0.25, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0783, 0.0087, 0, 0.66, ...
[ "'''\nModule which compares languague files to the master file and detects\nissues.\n\n@author: Rodrigo Damazio\n'''", "import os", "from mytracks.parser import StringsParser", "import mytracks.history", "class Validator(object):\n\n def __init__(self, languages):\n '''\n Builds a strings file valida...
''' Module for dealing with resource files (but not their contents). @author: Rodrigo Damazio ''' import os.path from glob import glob import re MYTRACKS_RES_DIR = 'MyTracks/res' ANDROID_MASTER_VALUES = 'values' ANDROID_VALUES_MASK = 'values-*' def GetMyTracksDir(): ''' Returns the directory in which the MyTracks directory is located. ''' path = os.getcwd() while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)): if path == '/': raise 'Not in My Tracks project' # Go up one level path = os.path.split(path)[0] return path def GetAllLanguageFiles(): ''' Returns a mapping from all found languages to their respective directories. ''' mytracks_path = GetMyTracksDir() res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK) language_dirs = glob(res_dir) master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES) if len(language_dirs) == 0: raise 'No languages found!' if not os.path.isdir(master_dir): raise 'Couldn\'t find master file' language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs] language_tuples.append(('en', master_dir)) return dict(language_tuples)
[ [ 8, 0, 0.0667, 0.1111, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1333, 0.0222, 0, 0.66, 0.125, 79, 0, 1, 0, 0, 79, 0, 0 ], [ 1, 0, 0.1556, 0.0222, 0, 0.66, ...
[ "'''\nModule for dealing with resource files (but not their contents).\n\n@author: Rodrigo Damazio\n'''", "import os.path", "from glob import glob", "import re", "MYTRACKS_RES_DIR = 'MyTracks/res'", "ANDROID_MASTER_VALUES = 'values'", "ANDROID_VALUES_MASK = 'values-*'", "def GetMyTracksDir():\n '''\n...
''' Module which parses a string XML file. @author: Rodrigo Damazio ''' from xml.parsers.expat import ParserCreate import re #import xml.etree.ElementTree as ET class StringsParser(object): ''' Parser for string XML files. This object is not thread-safe and should be used for parsing a single file at a time, only. ''' def Parse(self, file): ''' Parses the given file and returns a dictionary mapping keys to an object with attributes for that key, such as the value, start/end line and explicit revisions. In addition to the standard XML format of the strings file, this parser supports an annotation inside comments, in one of these formats: <!-- KEEP_PARENT name="bla" --> <!-- KEEP_PARENT name="bla" rev="123456789012" --> Such an annotation indicates that we're explicitly inheriting form the master file (and the optional revision says that this decision is compatible with the master file up to that revision). @param file: the name of the file to parse ''' self._Reset() # Unfortunately expat is the only parser that will give us line numbers self._xml_parser = ParserCreate() self._xml_parser.StartElementHandler = self._StartElementHandler self._xml_parser.EndElementHandler = self._EndElementHandler self._xml_parser.CharacterDataHandler = self._CharacterDataHandler self._xml_parser.CommentHandler = self._CommentHandler file_obj = open(file) self._xml_parser.ParseFile(file_obj) file_obj.close() return self._all_strings def _Reset(self): self._currentString = None self._currentStringName = None self._currentStringValue = None self._all_strings = {} def _StartElementHandler(self, name, attrs): if name != 'string': return if 'name' not in attrs: return assert not self._currentString assert not self._currentStringName self._currentString = { 'startLine' : self._xml_parser.CurrentLineNumber, } if 'rev' in attrs: self._currentString['revs'] = [attrs['rev']] self._currentStringName = attrs['name'] self._currentStringValue = '' def _EndElementHandler(self, name): if name != 'string': return assert self._currentString assert self._currentStringName self._currentString['value'] = self._currentStringValue self._currentString['endLine'] = self._xml_parser.CurrentLineNumber self._all_strings[self._currentStringName] = self._currentString self._currentString = None self._currentStringName = None self._currentStringValue = None def _CharacterDataHandler(self, data): if not self._currentString: return self._currentStringValue += data _KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+' r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?' r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*', re.MULTILINE | re.DOTALL) def _CommentHandler(self, data): keep_parent_match = self._KEEP_PARENT_REGEX.match(data) if not keep_parent_match: return name = keep_parent_match.group('name') self._all_strings[name] = { 'keepParent' : True, 'startLine' : self._xml_parser.CurrentLineNumber, 'endLine' : self._xml_parser.CurrentLineNumber } rev = keep_parent_match.group('rev') if rev: self._all_strings[name]['revs'] = [rev]
[ [ 8, 0, 0.0261, 0.0435, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0609, 0.0087, 0, 0.66, 0.3333, 573, 0, 1, 0, 0, 573, 0, 0 ], [ 1, 0, 0.0696, 0.0087, 0, 0.66...
[ "'''\nModule which parses a string XML file.\n\n@author: Rodrigo Damazio\n'''", "from xml.parsers.expat import ParserCreate", "import re", "class StringsParser(object):\n '''\n Parser for string XML files.\n\n This object is not thread-safe and should be used for parsing a single file at\n a time, only.\n...
''' Created on 18 avr. 12 @author: peterd ''' class CalculationError: pass def get_1d_cutting_plan(stock_piece_length, custom_pieces, cut_thickness): num_custom_pieces = len(custom_pieces) cutting_schema = {} i = 0 while i < num_custom_pieces: num_elem = 1 prev_custom_packs = [] while num_elem <= num_custom_pieces - i: k = num_elem - 1 n_elem_custom_packs = [] custom_pack = [] if num_elem == 1: custom_pack.append((i + k, custom_pieces[i + k])) n_elem_custom_packs.append(custom_pack) # TODO: move to function custom_pack_waste = stock_piece_length - custom_pieces[i + k] if cutting_schema.has_key(custom_pack_waste): if custom_pack not in cutting_schema[custom_pack_waste]: cutting_schema[custom_pack_waste].append(custom_pack) else: cutting_schema[custom_pack_waste] = [custom_pack] else: for prev_custom_pack in prev_custom_packs: l = k while l < num_custom_pieces: custom_pack = list(prev_custom_pack) custom_pack_used_length = 0 for custom_piece in custom_pack: custom_pack_used_length = custom_pack_used_length + custom_piece[1] n = l if i + n < num_custom_pieces: while len(custom_pack) < num_elem: try: if custom_pack_used_length + custom_pieces[i + n] > stock_piece_length: break except IndexError: pass custom_pack.append((i + n, custom_pieces[i + n])) custom_pack_used_length = custom_pack_used_length + custom_pieces[i + n] n = n + 1 if i + n == num_custom_pieces: break n_elem_custom_packs.append(custom_pack) custom_pack_waste = stock_piece_length - custom_pack_used_length # TODO: move to function if cutting_schema.has_key(custom_pack_waste): if custom_pack not in cutting_schema[custom_pack_waste]: cutting_schema[custom_pack_waste].append(custom_pack) else: cutting_schema[custom_pack_waste] = [custom_pack] l = l + 1 k = k + 1 num_elem = num_elem + 1 prev_custom_packs = list(n_elem_custom_packs) i = i + 1 custom_pieces_ids = [] bestfit_cutting_schema = {} for waste in sorted(cutting_schema.iterkeys()): for custom_pack in cutting_schema[waste]: already_packed = False for custom_piece in custom_pack: custom_piece_id = custom_piece[0] if custom_piece_id in custom_pieces_ids: already_packed = True break if not already_packed: custom_piece_lengths = [] custom_pack_length = 0 for custom_piece in custom_pack: custom_piece_id = custom_piece[0] custom_piece_length = custom_piece[1] custom_pieces_ids.append(custom_piece_id) # use remaining length from any of the previous schemas if possible reused_bestfit_custom_pack = [] remaining_waste = 0 for bestfit_waste in sorted(bestfit_cutting_schema.iterkeys()): i = 0 for bestfit_custom_pack in bestfit_cutting_schema[bestfit_waste]: if custom_piece_length <= bestfit_waste: bestfit_custom_pack.append(custom_piece_length) reused_bestfit_custom_pack = list(bestfit_custom_pack) remaining_waste = bestfit_waste - custom_piece_length waste = waste + custom_piece_length break i = i + 1 if reused_bestfit_custom_pack: break if not reused_bestfit_custom_pack: custom_piece_lengths.append(custom_piece_length) custom_pack_length = custom_pack_length + custom_piece_length else: custom_packs = bestfit_cutting_schema[bestfit_waste] if len(custom_packs) == 1: del bestfit_cutting_schema[bestfit_waste] else: del bestfit_cutting_schema[bestfit_waste][i] if bestfit_cutting_schema.has_key(remaining_waste): bestfit_cutting_schema[remaining_waste].append(reused_bestfit_custom_pack) else: bestfit_cutting_schema[remaining_waste] = [reused_bestfit_custom_pack] if custom_piece_lengths: if bestfit_cutting_schema.has_key(waste): bestfit_cutting_schema[waste].append(custom_piece_lengths) else: bestfit_cutting_schema[waste] = [custom_piece_lengths] if len(custom_pieces_ids) != num_custom_pieces: raise CalculationError() cutting_plan = {'schema': [] } i = 1 total_waste = 0 for bestfit_waste in sorted(bestfit_cutting_schema.iterkeys()): for bestfit_custom_pack in bestfit_cutting_schema[bestfit_waste]: augmented_custom_pack = [] for piece in bestfit_custom_pack: if piece == stock_piece_length: augmented_custom_pack.append(piece) else: augmented_custom_pack.append(piece - cut_thickness) cutting_plan['schema'].append({'id': i, 'pack': augmented_custom_pack, 'waste': bestfit_waste}) total_waste = total_waste + bestfit_waste i = i + 1 cutting_plan['total_stock_num'] = i - 1 cutting_plan['total_waste'] = total_waste return cutting_plan
[ [ 8, 0, 0.0194, 0.0323, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.0484, 0.0129, 0, 0.66, 0.5, 246, 0, 0, 0, 0, 0, 0, 0 ], [ 2, 0, 0.5097, 0.8839, 0, 0.66, ...
[ "'''\nCreated on 18 avr. 12\n\n@author: peterd\n'''", "class CalculationError:\n\tpass", "def get_1d_cutting_plan(stock_piece_length, custom_pieces, cut_thickness):\n\tnum_custom_pieces = len(custom_pieces)\t\n\tcutting_schema = {}\t\n\ti = 0\n\t\n\twhile i < num_custom_pieces:\n\t\tnum_elem = 1\t\n\t\tprev_cus...
#!/usr/bin/env python from django.core.management import execute_manager import imp try: imp.find_module('settings') # Assumed to be in the same directory. except ImportError: import sys sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__) sys.exit(1) import settings if __name__ == "__main__": execute_manager(settings)
[ [ 1, 0, 0.1429, 0.0714, 0, 0.66, 0, 879, 0, 1, 0, 0, 879, 0, 0 ], [ 1, 0, 0.2143, 0.0714, 0, 0.66, 0.25, 201, 0, 1, 0, 0, 201, 0, 0 ], [ 7, 0, 0.4643, 0.4286, 0, 0....
[ "from django.core.management import execute_manager", "import imp", "try:\n imp.find_module('settings') # Assumed to be in the same directory.\nexcept ImportError:\n import sys\n sys.stderr.write(\"Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized thi...
from django.conf.urls.defaults import patterns, include, url # Uncomment the next two lines to enable the admin: # from django.contrib import admin # admin.autodiscover() urlpatterns = patterns('', url(r'^solver$', '1d-cutter.views.cutting_plan_solver'), url(r'^1d-cutter/solver', '1d-cutter.views.cutting_plan_solver'), url(r'^1d-cutter$', '1d-cutter.views.init'), url(r'^1d-cutter/', '1d-cutter.views.init'), )
[ [ 1, 0, 0.0833, 0.0833, 0, 0.66, 0, 341, 0, 3, 0, 0, 341, 0, 0 ], [ 14, 0, 0.7917, 0.5, 0, 0.66, 1, 990, 3, 5, 0, 0, 75, 10, 5 ] ]
[ "from django.conf.urls.defaults import patterns, include, url", "urlpatterns = patterns('',\n\turl(r'^solver$', '1d-cutter.views.cutting_plan_solver'),\n url(r'^1d-cutter/solver', '1d-cutter.views.cutting_plan_solver'), \n url(r'^1d-cutter$', '1d-cutter.views.init'),\n url(r'^1d-cutter/', '1d-cutt...
# Django settings for app project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': '', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # URL prefix for admin static files -- CSS, JavaScript and images. # Make sure to use a trailing slash. # Examples: "http://foo.com/static/admin/", "/static/admin/". ADMIN_MEDIA_PREFIX = '/static/admin/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'd_@#2nc9-&05aacj5r=z*$_lru)@c2ir%^8tb-t1qwpk6a&h+8' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = '1d-cutter.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. "c:/job_stuff/prj/1d-cutter/templates", ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', '1d-cutter', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
[ [ 14, 0, 0.0204, 0.0068, 0, 0.66, 0, 309, 1, 0, 0, 0, 0, 4, 0 ], [ 14, 0, 0.0272, 0.0068, 0, 0.66, 0.0435, 7, 2, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.0476, 0.0204, 0, 0.6...
[ "DEBUG = True", "TEMPLATE_DEBUG = DEBUG", "ADMINS = (\n # ('Your Name', 'your_email@example.com'),\n)", "MANAGERS = ADMINS", "DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.\n 'NAME': '', ...
from django.views.generic.simple import direct_to_template from solver import * def init(request): return direct_to_template(request, '1d-cutter/1d_cutting_plan_form.html', {}) def cutting_plan_solver(request): order_num = request.GET['order_num'] stock_type = request.GET['stock_type'] stock_length = int(request.GET['stock_length']) cut_thickness = int(request.GET['cut_thickness']) custom_lengths = [] custom_lengths_str = request.GET['custom_lengths'].replace(',', ' ') for custom_length in custom_lengths_str.split(' '): try: custom_length_ext = int(custom_length) + cut_thickness if custom_length_ext > stock_length: custom_length_ext = stock_length custom_lengths.append(custom_length_ext) except ValueError: pass try: cutting_plan = get_1d_cutting_plan(stock_length, custom_lengths, cut_thickness) return direct_to_template(request, '1d-cutter/1d_cutting_plan_result.html', {'order_num': order_num, 'stock_type': stock_type, 'cutting_plan': cutting_plan}) except CalculationError: return direct_to_template(request, '1d-cutter/1d_cutting_plan_error.html', {'error': 'CalculationError'}) except Exception, e: return direct_to_template(request, '1d-cutter/1d_cutting_plan_error.html', {'error': e})
[ [ 1, 0, 0.0294, 0.0294, 0, 0.66, 0, 956, 0, 1, 0, 0, 956, 0, 0 ], [ 1, 0, 0.0882, 0.0294, 0, 0.66, 0.3333, 186, 0, 1, 0, 0, 186, 0, 0 ], [ 2, 0, 0.1618, 0.0588, 0, ...
[ "from django.views.generic.simple import direct_to_template", "from solver import *", "def init(request):\n\treturn direct_to_template(request, '1d-cutter/1d_cutting_plan_form.html', {})", "\treturn direct_to_template(request, '1d-cutter/1d_cutting_plan_form.html', {})", "def cutting_plan_solver(request):\n...
import time, urllib , sys, threading workers = [] pendingurls = [] def ex(line): if "http://" in line: #and (".pls" in line.lower() or ".m3u" in line.lower()): url = line.split("'")[1].replace("''", "") pendingurls.append(url) class Worker(threading.Thread): def run(self): while pendingurls: try: ok = False url = pendingurls.pop() target = urllib.urlopen(url) if target.getcode() == 200: ok = True for line in target: if ("<html" in line.lower()): ok = False break if not ok: print "-------------------" print url print "-------------------" #else: # print self.n ,"ok" except: print "-------------------" print url print "-------------------" workers.remove(self) pendingurls = [] print "parsing file" plsfile = open("radios.pas") for line in plsfile: ex(line) plsfile.close() print "starting threads" for i in range(10): worker = Worker() workers.append(worker) worker.start() print "waiting threads" while workers: time.sleep(1) print len(pendingurls),"remaining" print "done!!!" raw_input()
[ [ 1, 0, 0.0161, 0.0161, 0, 0.66, 0, 654, 0, 4, 0, 0, 654, 0, 0 ], [ 14, 0, 0.0484, 0.0161, 0, 0.66, 0.0667, 181, 0, 0, 0, 0, 0, 5, 0 ], [ 14, 0, 0.0645, 0.0161, 0, ...
[ "import time, urllib , sys, threading", "workers = []", "pendingurls = []", "def ex(line):\n if \"http://\" in line: #and (\".pls\" in line.lower() or \".m3u\" in line.lower()):\n url = line.split(\"'\")[1].replace(\"''\", \"\")\n pendingurls.append(url)", " if \"http://\" in line: #and ...
import struct, string, time, os pasw = 704 dst = [] def crypt(text): text = text.replace("http://", "") key = len(text) % 10 result = "" for i in xrange(len(text)): result += chr( ( ord(text[i]) ^ ( (pasw * (i+1)) + key ) ) % 256) return result def writeint8(num): data = struct.pack("B",num) dst.append(data) def writestring(text): l = len(text) data = struct.pack("B" + str(l) + "s",l,text) dst.append(data) def getarraysize(line): return int(line[line.find("..") + 2 : line.find("]")]) + 1 def getarraycontent(line): return line[line.find("'") + 1 : line.rfind("'")].replace("''","'") def error(msg): print 'Houston, we have a problem' print msg raw_input() bParse = False iLevel = -2 genres = [] chn = [] pls = [] totalcount = 0 tStart = time.clock() srcfile = open("radios.pas", "r") # -2 genrelist array # -1 content # 0 chn_ array # 1 content # 2 pls_ array # 3 content for line in srcfile: if "// " in line: # comented line continue if "const" in line: bParse = True elif ");" in line: bParse = False if iLevel < 3: iLevel += 1 else: iLevel = 0 # check if both lists have same size if len(chn) <> len(pls): error("%s chn=%d pls=%d" % (genres[0], len(chn), len(pls))) slist = [] # a list that we will sort for i1, i2 in zip(chn,pls): slist.append((i1,i2)) chn = [] pls = [] slist.sort() totalcount += len(slist) print "%s %d" % (genres[0], len(slist)) # write to file dst.append('\n'); dst.append('+' + genres.pop(0) + '\n') for i1, i2 in slist: dst.append('-' + i1 + '\n') dst.append('1' + i2 + '\n') elif bParse: if iLevel == -2: size = getarraysize(line) print "%d genres" % size iLevel += 1 elif iLevel == -1: genres.append(getarraycontent(line)) elif iLevel in (0,2): iLevel += 1 elif iLevel == 1: chn.append(getarraycontent(line)) elif iLevel == 3: pls.append(getarraycontent(line)) dst = "".join(dst) srcfile.close() dstfile = open("result.txt","w") dstfile.writelines(dst) dstfile.close() print "OK, %d radios converted and saved in %fs" % (totalcount, time.clock() - tStart) raw_input()
[ [ 1, 0, 0.0088, 0.0088, 0, 0.66, 0, 399, 0, 4, 0, 0, 399, 0, 0 ], [ 14, 0, 0.0265, 0.0088, 0, 0.66, 0.0417, 292, 1, 0, 0, 0, 0, 1, 0 ], [ 14, 0, 0.0442, 0.0088, 0, ...
[ "import struct, string, time, os", "pasw = 704", "dst = []", "def crypt(text):\n text = text.replace(\"http://\", \"\")\n key = len(text) % 10\n result = \"\"\n for i in xrange(len(text)):\n result += chr( ( ord(text[i]) ^ ( (pasw * (i+1)) + key ) ) % 256)\n return result", " text =...
import struct, string, time, os pasw = 704 dst = [] def crypt(text): text = text.replace("http://", "") return text #key = len(text) % 10 #result = "" #for i in xrange(len(text)): # result += chr( ( ord(text[i]) ^ ( (pasw * (i+1)) + key ) ) % 256) #return result def writeint8(num): data = struct.pack("B",num) dst.append(data) def writestring(text): l = len(text) data = struct.pack("B" + str(l) + "s",l,text) dst.append(data) def getarraysize(line): return int(line[line.find("..") + 2 : line.find("]")]) + 1 def getarraycontent(line): return line[line.find("'") + 1 : line.rfind("'")].replace("''","'") def error(msg): print 'Houston, we have a problem' print msg raw_input() bParse = False iLevel = -2 genres = [] chn = [] pls = [] totalcount = 0 tStart = time.clock() srcfile = open("radios.pas", "r") dstfile = open("db.dat", "wb") # -2 genrelist array # -1 content # 0 chn_ array # 1 content # 2 pls_ array # 3 content for line in srcfile: if "// " in line: # comented line continue if "const" in line: bParse = True elif ");" in line: bParse = False if iLevel < 3: iLevel += 1 else: iLevel = 0 # check if both lists have same size if len(chn) <> len(pls): error("%s chn=%d pls=%d" % (genres[0], len(chn), len(pls))) slist = [] # a list that we will sort for i1, i2 in zip(chn,pls): slist.append((i1,i2)) chn = [] pls = [] slist.sort() totalcount += len(slist) print "%s %d" % (genres[0], len(slist)) # write to file writestring(genres.pop(0)) writeint8(len(slist)) for i1, i2 in slist: writestring(i1) writestring(crypt(i2)) elif bParse: if iLevel == -2: size = getarraysize(line) print "%d genres" % size writeint8(size) iLevel += 1 elif iLevel == -1: genres.append(getarraycontent(line)) elif iLevel in (0,2): iLevel += 1 elif iLevel == 1: chn.append(getarraycontent(line)) elif iLevel == 3: pls.append(getarraycontent(line)) dst = "".join(dst) dstfile.write(dst) dstfile.close() srcfile.close() dstsize = len(dst) dstfile = open("../engine/db.inc","w") dstfile.write("const dbdata : array[0..%d] of Byte = (\n" % (dstsize -1 ,)) srcpos = 0 for c in dst: if srcpos > 0: dstfile.write(",") if srcpos % 12 == 0: dstfile.write("\n") dstfile.write(str(ord(c))) srcpos += 1 dstfile.write("\n);"); dstfile.close() print "OK, %d radios sorted and saved in %fs" % (totalcount, time.clock() - tStart) raw_input()
[ [ 1, 0, 0.0077, 0.0077, 0, 0.66, 0, 399, 0, 4, 0, 0, 399, 0, 0 ], [ 14, 0, 0.0231, 0.0077, 0, 0.66, 0.0323, 292, 1, 0, 0, 0, 0, 1, 0 ], [ 14, 0, 0.0385, 0.0077, 0, ...
[ "import struct, string, time, os", "pasw = 704", "dst = []", "def crypt(text):\n text = text.replace(\"http://\", \"\")\n return text", " text = text.replace(\"http://\", \"\")", " return text", "def writeint8(num):\n data = struct.pack(\"B\",num)\n dst.append(data)", " data = str...
extlist = [".bk1",".bk2",".$$$",".local",".a",".tmp",".drc",".o",".cfg",".ddp", ".stat",".pec2bac",".identcache",".dcu",".ppu",".depend",".layout",".win"] #put extensions to delete import sys, os, subprocess print "START THE CLEARING PROCESS" print "DELETING FILES WITH THE FOLLOWING EXT" print extlist i = 0 for root, dirs, files in os.walk(os.getcwd()): for file in files: #for ext in extlist: fileext = os.path.splitext(file)[1] if fileext in extlist: filepath = os.path.join(root,file) print filepath os.remove(filepath) i+=1 print "%d files found and deleted" % i print "Exiting..."
[ [ 14, 0, 0.0714, 0.0952, 0, 0.66, 0, 699, 0, 0, 0, 0, 0, 5, 0 ], [ 1, 0, 0.1905, 0.0476, 0, 0.66, 0.125, 509, 0, 3, 0, 0, 509, 0, 0 ], [ 8, 0, 0.2857, 0.0476, 0, 0....
[ "extlist = [\".bk1\",\".bk2\",\".$$$\",\".local\",\".a\",\".tmp\",\".drc\",\".o\",\".cfg\",\".ddp\",\n \".stat\",\".pec2bac\",\".identcache\",\".dcu\",\".ppu\",\".depend\",\".layout\",\".win\"] #put extensions to delete", "import sys, os, subprocess", "print(\"START THE CLEARING PROCESS\")", "print(...
#!/usr/bin/env python #coding=utf-8 """ Author: Xia Kai <xiaket@gmail.com> Filename: models.py Type: Class definition Last modified: 2010-05-24 22:27 Description: This file contains a class that would turn a dictionary containing user information as was returned by json request into a T163User class. So we may more convienently retrieve user information. """ from utils import parse_timestring class T163UserBase(object): """ Basic user information that can be public accessible. User dictionary = { profile_image_url_large 用户资料的大图片 80*80 id 用户id profile_image_url_small 用户资料的小图片 24*24 verified 已验证(为名人预留的?) reply_type 回复类型 profile_sidebar_fill_color profile_text_color followers_count fo这个用户的人数 location 这个用户所处的地理位置 profile_background_color utc_offset statuses_count 用户微博数 description 用户个人描述 friends_count 这个用户fo的人数 profile_link_color profile_image_url profile_background_image_url 用户资料的小图片 48*48 screen_name 屏幕显示名, 更常用 profile_background_tile favourites_count name 用户名 url 链接 gender 性别 created_at 注册时间 time_zone 时区 profile_sidebar_border_color """ def __init__(self, userdict): """ Long and tedious initilization process. """ # account information self.id = userdict['id'] self.screen_name = userdict['screen_name'] self.name = userdict['name'] # user profile self.url = userdict['url'] self.description = userdict['description'] self.location = userdict['location'] # following information self.followers_count = userdict['followers_count'] self.statuses_count = userdict['statuses_count'] self.friends_count = userdict['friends_count'] self.favourites_count = userdict['favourites_count'] # Gender is made more human readable. if userdict['gender'] == 0: self.gender = 'M' elif userdict['gender'] == 1: self.gender = 'F' else: self.gender = 'U' # Created_at is translated into a python datetime object. self.created_at = parse_timestring(userdict['created_at']) # these are not implemented yet, so we comment'em'out. """ # account information self.verified = userdict['verified'] # user profile self.time_zone = userdict['time_zone'] self.utc_offset = userdict['utc_offset'] # avatar image urls. self.image_large = userdict['profile_image_url_large'] self.image_medium = userdict['profile_image_url'] self.image_small = userdict['profile_image_url_small'] # user homepage appearance. self.profile_sidebar_fill_color = \ userdict['profile_sidebar_fill_color'] self.profile_text_color = userdict['profile_text_color'] self.profile_background_color = userdict['profile_background_color'] self.profile_link_color = userdict['profile_link_color'] self.profile_background_image_url = \ userdict['profile_background_image_url'] self.profile_background_tile = userdict['profile_background_tile'] self.profile_sidebar_border_color = \ userdict['profile_sidebar_border_color'] # unknown... self.reply_type = userdict['reply_type'] """ class User(T163UserBase): """ Additional user information is stored in the following dictionary: User dictionary = { telephone 用户手机号 email 用户邮箱 } """ def __init__(self, userdict): T163UserBase.__init__(self, userdict) # additional user profile self.telephone = userdict['telephone'] self.email = userdict['email'] class Follower(T163UserBase): """ This class is used to store information for followers, apart from those properties defined in T163UserBase, this class has the following information: Follower dictionary = { followed_by 这个用户是否在follow你 status 这个用户最新的一条推的详细信息 following 你是否在follow这个用户 """ def __init__(self, userdict): T163UserBase.__init__(self, userdict) self.followed_by = userdict['followed_by'] self.status = userdict['status'] self.following = userdict['following'] class T163StatusBase(object): """ This class is used to store basic status information. The status information is provided in a dictionary: Status dictionary = { user_id 用户id truncated 未知 text 推内容 created_at 发推时间 retweet_status_id 最初的被retweeted的消息的id. source 网易微博 in_reply_to_status_id None in_reply_to_screen_name None in_reply_to_user_id None type 未知 id 本消息id } """ def __init__(self, status_dict): self.user_id = status_dict['user_id'] self.text = status_dict['text'] self.created_at = parse_timestring(status_dict['created_at']) self.retweet_status_id = status_dict['retweet_status_id'] self.source = status_dict['source'] self.id = status_dict['id'] self.in_reply_to_status_id = status_dict['in_reply_to_status_id'] self.in_reply_to_screen_name = status_dict['in_reply_to_screen_name'] self.in_reply_to_user_id = status_dict['in_reply_to_user_id'] # these are not implemented yet, so we comment'em'out. """ self.truncated = status_dict['truncated'] self.type = status_dict['type'] """ class Status(T163StatusBase): """ This class is for the show() api, which is used to show the detailed information for a tweet. Additional information: favorited False in_reply_to_status_text None favorited_at None in_reply_to_user_name None user """ def __init__(self, status_dict): T163StatusBase.__init__(self, status_dict) self.user = T163UserBase(status_dict['user']) # these are not implemented yet, so we comment'em'out. """ self.favorited = status_dict['favorited'] self.in_reply_to_status_text = status_dict['in_reply_to_status_text'] self.favorited_at = status_dict['favorited_at'] self.in_reply_to_user_name = status_dict['in_reply_to_user_name'] """ class StatusWithIpaddr(T163StatusBase): """ This class is for the show() api, which is used to show the detailed information for a tweet. Additional information: auditStatus 未知 ipaddr 117.84.92.50 """ def __init__(self, status_dict): T163StatusBase.__init__(self, status_dict) self.ipaddr = status_dict['ipaddr'] class DirectMessage(object): """ sender_screen_name corleone followed_by True sender T163UserBaseObject text 测试内容啊啊 created_at Tue Apr 27 20:40:58 +0800 2010 sender_id -5127315299555819730 recipient_id 6493809605159984224 recipient_screen_name xiaket recipient T163UserBaseObject id 7950999978748591002 """ def __init__(self, messagedict): # message self.id = messagedict['id'] self.text = messagedict['text'] self.created_at = parse_timestring(messagedict['created_at']) # sender self.sender = T163UserBase(messagedict['sender']) self.sender_id = messagedict['sender_id'] self.sender_screen_name = messagedict['sender_screen_name'] self.followed = messagedict['followed_by'] # recipient self.recipient = T163UserBase(messagedict['recipient']) self.recipient_id = messagedict['recipient_id'] self.recipient_screen_name = messagedict['recipient_screen_name'] class SearchHit(T163StatusBase): """ This class is for the show() api, which is used to show the detailed information for a tweet. Additional information: favorited False favorited_at None in_reply_to_user_name None user """ def __init__(self, status_dict): T163StatusBase.__init__(self, status_dict) self.user = T163UserBase(status_dict['user']) # these are not implemented yet, so we comment'em'out. """ self.favorited = status_dict['favorited'] self.favorited_at = status_dict['favorited_at'] self.in_reply_to_user_name = status_dict['in_reply_to_user_name'] """ class SearchResult(object): """ totalHits 14973 next_page completed_in 0 availHits 600 refresh_url since_id 0 results_per_page 30 result query max_id 0 page 1 """ def __init__(self, result_dict): self.totalHits = result_dict['totalHits'] self.next_page = result_dict['next_page'] self.completed_in = result_dict['completed_in'] self.availHits = result_dict['availHits'] self.refresh_url = result_dict['refresh_url'] self.since_id = result_dict['since_id'] self.results_per_page = result_dict['results_per_page'] self.result = [] for item in result_dict['result']: self.result.append(SearchHit(item)) self.query = result_dict['query'] self.max_id = result_dict['max_id'] self.page = result_dict['page'] class UserSearchResult(object): """ totalHits number availHits number result list """ def __init__(self, result_dict): self.totalHits = result_dict['totalHits'] self.availHits = result_dict['availHits'] self.result = [] for item in result_dict['result']: self.result.append(UserSearchHit(item)) class UserSearchHit(T163UserBase): """ Additional information stored in the search result: telephone always null email always null status StatusWithIpaddr following False } """ def __init__(self, userdict): T163UserBase.__init__(self, userdict) # additional user profile self.status = StatusWithIpaddr(userdict['status']) self.following = userdict['following']
[ [ 8, 0, 0.0241, 0.0331, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0422, 0.003, 0, 0.66, 0.0833, 970, 0, 1, 0, 0, 970, 0, 0 ], [ 3, 0, 0.1867, 0.2741, 0, 0.66,...
[ "\"\"\"\nAuthor: Xia Kai <xiaket@gmail.com>\nFilename: models.py\nType: Class definition\nLast modified: 2010-05-24 22:27\n\nDescription:\nThis file contains a class that would turn a dictionary containing user", "from utils import parse_timestring", "class T163UserBase(object):\n \"...
#!/usr/bin/env python #coding=utf-8 """ Author: Xia Kai <xiaket@gmail.com> Filename: utils.py Type: Utility Last modified: 2010-07-18 14:06 Description: Utility functions for this project. """ import locale import os import urllib2 from datetime import datetime, timedelta ############## # Exceptions # ############## class AuthenticationError(RuntimeError): """ Exception caused by incorrect username/password. """ pass class UserNotFound(ValueError): """ Exception caused by querying a user that does not exist. """ pass class IllegalCall(ValueError): """ Exception caused by illegal call, e.g., trying to remove other people's status, or a message longer than 163 characters. """ pass class UnknownError(RuntimeError): """ Unexpected HTTP code returned. """ pass ############## # Decorators # ############## def require_login(func): """ This is a decorator inspired by a similar decorator in Django. """ def morewrapped(func): def wrapped(kls, *args, **kwargs): if not kls.logged_in: kls._login() return func(kls, *args, **kwargs) return wrapped return morewrapped(func) def check_screen_name(func): """ This decorator would check the screen_name in the parameter of the original function. It is to be noted that the screen must be the first argument if we are using a positional parameter. """ def morewrapped(func): def wrapped(kls, *args, **kwargs): if 'screen_name' in kwargs: _screen_name = kwargs['screen_name'] elif len(args): _screen_name = args[0] else: _screen_name = None if _screen_name: # If the screen_name is set, we shall check if it is a valid # screen_name. We do this by visiting the homepage of this # screen_name: _url = "/users/show.json?screen_name=%s" % _screen_name _message = "Specified user does not exist." _err_dict = { 404: (UserNotFound, _message), } kls.request(_url, errors=_err_dict) return func(kls, *args, **kwargs) return wrapped return morewrapped(func) def check_status_id(func): """ This decorator would check the screen_name in the parameter of the original function. It is to be noted that the screen must be the first argument if we are using a positional parameter. """ def morewrapped(func): def wrapped(kls, *args, **kwargs): if 'status_id' in kwargs: _status_id = kwargs['status_id'] elif len(args) != 0: _status_id = args[0] kls.show_status(_status_id) return func(kls, *args, **kwargs) return wrapped return morewrapped(func) def parse_timestring(timestring): """ Accept a time string, parse it and return a datetime object. >>> parse_timestring("Mon Apr 26 10:49:29 +0800 2010") datetime.datetime(2010, 4, 26, 2, 49, 29) >>> parse_timestring("Mon Apr 26 10:49:29 -0800 2010") datetime.datetime(2010, 4, 26, 18, 49, 29) >>> parse_timestring("Mon Apr 26 10:49:29 +0830 2010") datetime.datetime(2010, 4, 26, 2, 19, 29) """ oldlocale = locale.getlocale(locale.LC_TIME) # On different OS platform, setlocale would have to be called differently. if os.name =='nt': locale.setlocale(locale.LC_TIME, 'english') elif os.name =='posix': locale.setlocale(locale.LC_TIME, 'en_US.UTF-8') strf = timestring[:20] + timestring[26:] created_at = datetime.strptime(strf, "%a %b %d %H:%M:%S %Y") # set it back. locale.setlocale(locale.LC_TIME, oldlocale) delta = timestring[20:25] hour = int(delta[:3]) minute = int(delta[3:]) return created_at - timedelta(hours=hour, minutes=minute) class RedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): """ For the moment, t.163.com would not return 404 status code correctly. Instead, it would return a 302 and redirect user to a page that will display 404 information. This would make web user happy, but we have to do extra to make our API elegant. Thus we have this handler to correctly raise 404 code. """ result = urllib2.HTTPRedirectHandler.http_error_302( self, req, fp, code, msg, headers) if headers['location'] == 'http://t.163.com/notfound': raise urllib2.HTTPError(req.get_full_url(), 404, msg, headers, fp) return result if __name__ == "__main__": import doctest doctest.testmod()
[ [ 8, 0, 0.0422, 0.0542, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0723, 0.006, 0, 0.66, 0.0714, 884, 0, 1, 0, 0, 884, 0, 0 ], [ 1, 0, 0.0783, 0.006, 0, 0.66, ...
[ "\"\"\"\nAuthor: Xia Kai <xiaket@gmail.com>\nFilename: utils.py\nType: Utility\nLast modified: 2010-07-18 14:06\n\nDescription:\nUtility functions for this project.", "import locale", "import os", "import urllib2", "from datetime import datetime, timedelta", "class AuthenticationE...
#!/usr/bin/env python #coding=utf-8 """ Author: Xia Kai <xiaket@gmail.com> Filename: __init__.py Type: Module meta information holder Last modified: 2010-05-16 20:44 Description: """ __author__ = "xiaket" __version__ = "0.2b"
[ [ 8, 0, 0.5, 0.6154, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.9231, 0.0769, 0, 0.66, 0.5, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 1, 0.0769, 0, 0.66, 1, ...
[ "\"\"\"\nAuthor: Xia Kai <xiaket@gmail.com>\nFilename: __init__.py\nType: Module meta information holder\nLast modified: 2010-05-16 20:44\n\nDescription:\n\"\"\"", "__author__ = \"xiaket\"", "__version__ = \"0.2b\"" ]
#!/usr/bin/env python #coding=utf-8 """ Author: Xia Kai <xiaket@gmail.com> Filename: models.py Type: Class definition Last modified: 2010-05-24 22:27 Description: This file contains a class that would turn a dictionary containing user information as was returned by json request into a T163User class. So we may more convienently retrieve user information. """ from utils import parse_timestring class T163UserBase(object): """ Basic user information that can be public accessible. User dictionary = { profile_image_url_large 用户资料的大图片 80*80 id 用户id profile_image_url_small 用户资料的小图片 24*24 verified 已验证(为名人预留的?) reply_type 回复类型 profile_sidebar_fill_color profile_text_color followers_count fo这个用户的人数 location 这个用户所处的地理位置 profile_background_color utc_offset statuses_count 用户微博数 description 用户个人描述 friends_count 这个用户fo的人数 profile_link_color profile_image_url profile_background_image_url 用户资料的小图片 48*48 screen_name 屏幕显示名, 更常用 profile_background_tile favourites_count name 用户名 url 链接 gender 性别 created_at 注册时间 time_zone 时区 profile_sidebar_border_color """ def __init__(self, userdict): """ Long and tedious initilization process. """ # account information self.id = userdict['id'] self.screen_name = userdict['screen_name'] self.name = userdict['name'] # user profile self.url = userdict['url'] self.description = userdict['description'] self.location = userdict['location'] # following information self.followers_count = userdict['followers_count'] self.statuses_count = userdict['statuses_count'] self.friends_count = userdict['friends_count'] self.favourites_count = userdict['favourites_count'] # Gender is made more human readable. if userdict['gender'] == 0: self.gender = 'M' elif userdict['gender'] == 1: self.gender = 'F' else: self.gender = 'U' # Created_at is translated into a python datetime object. self.created_at = parse_timestring(userdict['created_at']) # these are not implemented yet, so we comment'em'out. """ # account information self.verified = userdict['verified'] # user profile self.time_zone = userdict['time_zone'] self.utc_offset = userdict['utc_offset'] # avatar image urls. self.image_large = userdict['profile_image_url_large'] self.image_medium = userdict['profile_image_url'] self.image_small = userdict['profile_image_url_small'] # user homepage appearance. self.profile_sidebar_fill_color = \ userdict['profile_sidebar_fill_color'] self.profile_text_color = userdict['profile_text_color'] self.profile_background_color = userdict['profile_background_color'] self.profile_link_color = userdict['profile_link_color'] self.profile_background_image_url = \ userdict['profile_background_image_url'] self.profile_background_tile = userdict['profile_background_tile'] self.profile_sidebar_border_color = \ userdict['profile_sidebar_border_color'] # unknown... self.reply_type = userdict['reply_type'] """ class User(T163UserBase): """ Additional user information is stored in the following dictionary: User dictionary = { telephone 用户手机号 email 用户邮箱 } """ def __init__(self, userdict): T163UserBase.__init__(self, userdict) # additional user profile self.telephone = userdict['telephone'] self.email = userdict['email'] class Follower(T163UserBase): """ This class is used to store information for followers, apart from those properties defined in T163UserBase, this class has the following information: Follower dictionary = { followed_by 这个用户是否在follow你 status 这个用户最新的一条推的详细信息 following 你是否在follow这个用户 """ def __init__(self, userdict): T163UserBase.__init__(self, userdict) self.followed_by = userdict['followed_by'] self.status = userdict['status'] self.following = userdict['following'] class T163StatusBase(object): """ This class is used to store basic status information. The status information is provided in a dictionary: Status dictionary = { user_id 用户id truncated 未知 text 推内容 created_at 发推时间 retweet_status_id 最初的被retweeted的消息的id. source 网易微博 in_reply_to_status_id None in_reply_to_screen_name None in_reply_to_user_id None type 未知 id 本消息id } """ def __init__(self, status_dict): self.user_id = status_dict['user_id'] self.text = status_dict['text'] self.created_at = parse_timestring(status_dict['created_at']) self.retweet_status_id = status_dict['retweet_status_id'] self.source = status_dict['source'] self.id = status_dict['id'] self.in_reply_to_status_id = status_dict['in_reply_to_status_id'] self.in_reply_to_screen_name = status_dict['in_reply_to_screen_name'] self.in_reply_to_user_id = status_dict['in_reply_to_user_id'] # these are not implemented yet, so we comment'em'out. """ self.truncated = status_dict['truncated'] self.type = status_dict['type'] """ class Status(T163StatusBase): """ This class is for the show() api, which is used to show the detailed information for a tweet. Additional information: favorited False in_reply_to_status_text None favorited_at None in_reply_to_user_name None user """ def __init__(self, status_dict): T163StatusBase.__init__(self, status_dict) self.user = T163UserBase(status_dict['user']) # these are not implemented yet, so we comment'em'out. """ self.favorited = status_dict['favorited'] self.in_reply_to_status_text = status_dict['in_reply_to_status_text'] self.favorited_at = status_dict['favorited_at'] self.in_reply_to_user_name = status_dict['in_reply_to_user_name'] """ class StatusWithIpaddr(T163StatusBase): """ This class is for the show() api, which is used to show the detailed information for a tweet. Additional information: auditStatus 未知 ipaddr 117.84.92.50 """ def __init__(self, status_dict): T163StatusBase.__init__(self, status_dict) self.ipaddr = status_dict['ipaddr'] class DirectMessage(object): """ sender_screen_name corleone followed_by True sender T163UserBaseObject text 测试内容啊啊 created_at Tue Apr 27 20:40:58 +0800 2010 sender_id -5127315299555819730 recipient_id 6493809605159984224 recipient_screen_name xiaket recipient T163UserBaseObject id 7950999978748591002 """ def __init__(self, messagedict): # message self.id = messagedict['id'] self.text = messagedict['text'] self.created_at = parse_timestring(messagedict['created_at']) # sender self.sender = T163UserBase(messagedict['sender']) self.sender_id = messagedict['sender_id'] self.sender_screen_name = messagedict['sender_screen_name'] self.followed = messagedict['followed_by'] # recipient self.recipient = T163UserBase(messagedict['recipient']) self.recipient_id = messagedict['recipient_id'] self.recipient_screen_name = messagedict['recipient_screen_name'] class SearchHit(T163StatusBase): """ This class is for the show() api, which is used to show the detailed information for a tweet. Additional information: favorited False favorited_at None in_reply_to_user_name None user """ def __init__(self, status_dict): T163StatusBase.__init__(self, status_dict) self.user = T163UserBase(status_dict['user']) # these are not implemented yet, so we comment'em'out. """ self.favorited = status_dict['favorited'] self.favorited_at = status_dict['favorited_at'] self.in_reply_to_user_name = status_dict['in_reply_to_user_name'] """ class SearchResult(object): """ totalHits 14973 next_page completed_in 0 availHits 600 refresh_url since_id 0 results_per_page 30 result query max_id 0 page 1 """ def __init__(self, result_dict): self.totalHits = result_dict['totalHits'] self.next_page = result_dict['next_page'] self.completed_in = result_dict['completed_in'] self.availHits = result_dict['availHits'] self.refresh_url = result_dict['refresh_url'] self.since_id = result_dict['since_id'] self.results_per_page = result_dict['results_per_page'] self.result = [] for item in result_dict['result']: self.result.append(SearchHit(item)) self.query = result_dict['query'] self.max_id = result_dict['max_id'] self.page = result_dict['page'] class UserSearchResult(object): """ totalHits number availHits number result list """ def __init__(self, result_dict): self.totalHits = result_dict['totalHits'] self.availHits = result_dict['availHits'] self.result = [] for item in result_dict['result']: self.result.append(UserSearchHit(item)) class UserSearchHit(T163UserBase): """ Additional information stored in the search result: telephone always null email always null status StatusWithIpaddr following False } """ def __init__(self, userdict): T163UserBase.__init__(self, userdict) # additional user profile self.status = StatusWithIpaddr(userdict['status']) self.following = userdict['following']
[ [ 8, 0, 0.0241, 0.0331, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0422, 0.003, 0, 0.66, 0.0833, 970, 0, 1, 0, 0, 970, 0, 0 ], [ 3, 0, 0.1867, 0.2741, 0, 0.66,...
[ "\"\"\"\nAuthor: Xia Kai <xiaket@gmail.com>\nFilename: models.py\nType: Class definition\nLast modified: 2010-05-24 22:27\n\nDescription:\nThis file contains a class that would turn a dictionary containing user", "from utils import parse_timestring", "class T163UserBase(object):\n \"...
#!/usr/bin/env python #coding=utf-8 """ Author: Xia Kai <xiaket@gmail.com> Filename: tests.py Type: Unit test module Last modified: 2010-05-21 16:51 Description: This file contains unit test cases for this project. """ import os import time import unittest from models import T163UserBase, User, Follower, Status, DirectMessage from models import SearchResult, SearchHit, UserSearchResult, UserSearchHit from session import T163Session as Session from utils import AuthenticationError, UserNotFound, IllegalCall # This FAKE_USER should not be available as an 163 account. FAKE_USER = "xiakai.nju@gmail.com" FAKE_PASS = "fakepass" # This should be a valid account, preferably, at your disposal. If you do not # have another account, at least find some account that is following you. and # you are not following. The following test case would assume that the # TEST_ACCOUNT is following the account that you give to __init__. # TEST_ACCOUNT_ALT should be someone who is not following you. TEST_ACCOUNT = "xiaket" TEST_ACCOUNT_ALT = "zhangjiawei" # This should be an invalid screen_name FAKE_SCREEN_NAME = "aslfkqwlalsdlfalkw" # This should be an invalid user/status id. FAKE_ID = "398066" # These are test message contents, variables with prefix FAKE are longer than # 163 characters, while variables with prefix TEST lay within the constraint. FAKE_MESSAGE_EN = u"abcdefghijklmnopqrstuvwxyz "*7 TEST_MESSAGE_EN = u"abcdefghijklmnopqrstuvwxyz "*6 FAKE_MESSAGE_CN = u"中文消息长度测试 "*19 TEST_MESSAGE_CN = u"中文消息长度测试 "*18 class SessionTests(unittest.TestCase): """ This class would test authentication related cases. In summary, we have: 1. No username provided. 2. Invalid username 3. Invalid username/password pair. 4. Invalid cookie file. 5. Invalid cookie file/username pair. 6. Valid cookie file removed after Session initilization. """ def __init__(self, testname, username=None, password=None, fulltest=False): """ Over default __init__ method to allow import of username and password. Setup username and passwords. Some tests are somehow dangerous, we do them with a fulltest flag. """ super(SessionTests, self).__init__(testname) self.username = username self.password = password self.fulltest = fulltest if fulltest: print "performing full test." def session_init(self): """ This function would test the __init__ method of Session. There sure be no cookie file both before and after this test case. """ # If neither username nor cookiefile is given to __init__. An # AuthenticationError would no doubt be raised. self.assertRaises(AuthenticationError, Session) # Should raise an AuthenticationError if an invalid username is # provided. self.assertRaises(AuthenticationError, Session, username=FAKE_USER) # This should not raise Exceptions, since it is a valid # username/password pair. Session(username=self.username, password=self.password) # This should not raise Exceptions, even though neither username nor # password is provided. Session(cookiefile="%s.txt" % self.username) # Since we have a valid cookie file now, anything given as username or # password would be ignored, so the following example would work. Session(cookiefile="%s.txt" % self.username, username=FAKE_USER) # Remove existing cookie file before we continue. The cookie should # have been created before. os.remove("%s.txt" % self.username) # This would cause AuthenticationError, since the cookiefile provided # does not exist. self.assertRaises( AuthenticationError, Session, cookiefile="%s.txt" % FAKE_USER, ) # This would work, since a fallback username/password scheme would # work. But this would save cookie to "%s.txt" % FAKE_USER, instead of # "%s.txt" % self.username. So we shall remove the cookiefile after # this test. Session( cookiefile="%s.txt" % FAKE_USER, username=self.username, password=self.password, ) os.remove("%s.txt" % FAKE_USER) # This should raise AuthenticationError, since it is an invalid # username/password pair. # CAUTION: This is dangerous. Frequent test of the following test would # lock your valid account up. if self.fulltest: self.assertRaises( AuthenticationError, Session, username=self.username, password=FAKE_PASS, ) def relation_api(self): """ This function would test the relationship related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ########################## # self.show_friendship # #------------------------# # /friendships/show.json # ########################## # Calling Session.show_friendship(target_screen_name=FAKE_SCREEN_NAME) # should cause an exception. self.assertRaises( UserNotFound, session.show_friendship, target_screen_name=FAKE_SCREEN_NAME, ) # This time, the target is valid, while the source_id is invalid. It # should also cause an exception. self.assertRaises( UserNotFound, session.show_friendship, source_id=FAKE_ID, target_screen_name=TEST_ACCOUNT, ) # This time, no target is provided, this should cause another # exception. self.assertRaises( IllegalCall, session.show_friendship, source_id=FAKE_ID, ) # This should work, giving the following relationship between # TEST_ACCOUNT and self.username. fo, foed = session.show_friendship( target_screen_name=TEST_ACCOUNT ) self.assertTrue( foed, "TEST_ACCOUNT:%s should follow %s" % (TEST_ACCOUNT, self.username), ) ########################################## # self.followers # #----------------------------------------# # /statuses/followers/{screen_name}.json # ########################################## # User current user's screen_name by default, so this should equal. self.assertEqual( [user.id for user in session.followers()], [user.id for user in session.followers(session.screen_name)], ) # This should never return an empty list, since at least TEST_ACCOUNT # is following self.username. Items in the list should be an instance # of Follower. followers = session.followers(session.screen_name) self.assertTrue(isinstance(followers[0], Follower)) # Test again, TEST_ACCOUNT should be following self.username self.assertTrue(TEST_ACCOUNT in [u.screen_name for u in followers]) # This should raise an exception, for the screen_name is invalid. self.assertRaises( UserNotFound, session.followers, screen_name=FAKE_SCREEN_NAME, ) # The above should work for both positional arguments and keyword # arguments. self.assertRaises( UserNotFound, session.followers, FAKE_SCREEN_NAME, ) ########################################## # self.friends # #----------------------------------------# # /statuses/friends/{screen_name}.json # ########################################## # This should give a list of Follower objects. friends = session.friends(TEST_ACCOUNT) self.assertTrue(isinstance(friends[0], Follower)) # Since an almost identical API is well tested(I hope!) above, I see no # point repeating it here. ############################################ # self.create_friendship # #------------------------------------------# # /friendships/create/{screen_name}.json # ############################################ # As before, this function is decorated with check_screen_name. # So we shall get a UserNotFound with an invalid screen_name. self.assertRaises( UserNotFound, session.create_friendship, FAKE_SCREEN_NAME, ) # Follow yourself would get an IllegalCall exception. self.assertRaises( IllegalCall, session.create_friendship, session.screen_name, ) # Before we continue, we shall follow # This should work. session.create_friendship(TEST_ACCOUNT) ############################################ # self.destroy_friendship # #------------------------------------------# # /friendships/destroy/{screen_name}.json # ############################################ # This should work. session.destroy_friendship(TEST_ACCOUNT) # It cannot be done twice: self.assertRaises( IllegalCall, session.destroy_friendship, TEST_ACCOUNT, ) # Nor can we unfollow someone who do not exist. self.assertRaises( UserNotFound, session.destroy_friendship, FAKE_SCREEN_NAME, ) def mail_api(self): """ This function would test the direct message related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ############################################ # self.new_direct_message # #------------------------------------------# # /direct_messages/new.json # ############################################ # This is an IllegalCall, since the receiver is invalid. self.assertRaises( IllegalCall, session.new_direct_message, FAKE_SCREEN_NAME, TEST_MESSAGE_CN, ) # This is an IllegalCall, since the message is too long. self.assertRaises( IllegalCall, session.new_direct_message, TEST_ACCOUNT, FAKE_MESSAGE_CN, ) # This is an IllegalCall, since the message is too long. self.assertRaises( IllegalCall, session.new_direct_message, TEST_ACCOUNT, FAKE_MESSAGE_EN, ) # This is an IllegalCall, since you cannot send a mail to yourself. self.assertRaises( IllegalCall, session.new_direct_message, session.screen_name, TEST_MESSAGE_EN, ) # This is an IllegalCall, since TEST_ACCOUNT_ALT is not following you. self.assertRaises( IllegalCall, session.new_direct_message, TEST_ACCOUNT_ALT, TEST_MESSAGE_EN, ) # Finally, this should work. message = session.new_direct_message(TEST_ACCOUNT, TEST_MESSAGE_EN) self.assertTrue(isinstance(message, DirectMessage)) # A direct message is sent to TEST_ACCOUNT, we shall retrieve the first # message in outbox and check if it is the same one. ############################################ # self.sent_direct_messages # #------------------------------------------# # /direct_messages/sent.json # ############################################ messages = session.sent_direct_messages() self.assertTrue(isinstance(messages[0], DirectMessage)) self.assertEqual(messages[0].id, message.id) ############################################ # self.destroy_direct_message # #------------------------------------------# # /direct_messages/destroy/{id}.json # ############################################ # This is an illegal call, since the id specified is invalid. self.assertRaises( IllegalCall, session.destroy_direct_message, FAKE_ID, ) removed_message = session.destroy_direct_message(message.id) self.assertEqual(removed_message.id, message.id) # This is an illegal call, since you cannot remove the same direct # message twice. self.assertRaises( IllegalCall, session.destroy_direct_message, message.id, ) ############################################ # self.direct_messages # #------------------------------------------# # /direct_messages.json # ############################################ # If there are direct messages in your inbox, we shall try to make sure # that it is a DirectMessage instance. messages = session.direct_messages() if len(messages) != 0: self.assertTrue(isinstance(messages[0], DirectMessage)) def search_api(self): """ This function would test the search related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ############################################ # self.search # #------------------------------------------# # /search.json # ############################################ # This is an IllegalCall, since no query keyword is specified. self.assertRaises( TypeError, session.search, ) # I hope I'm not a narcissist... search_result = session.search(TEST_ACCOUNT) self.assertTrue(isinstance(search_result, SearchResult)) result = search_result.result[0] self.assertTrue(isinstance(result, SearchHit)) ############################################ # self.user_search # #------------------------------------------# # /1/user/search.json # ############################################ search_result = session.user_search(TEST_ACCOUNT) self.assertTrue(isinstance(search_result, UserSearchResult)) result = search_result.result[0] self.assertTrue(isinstance(result, UserSearchHit)) def favorite_api(self): """ This function would test favorite related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ############################################ # self.create_favorite # #------------------------------------------# # /favorites/create/{id}.json # ############################################ # This is an IllegalCall, since the id is invalid. self.assertRaises( IllegalCall, session.create_favorite, FAKE_ID, ) # We shall get a valid message id by looking for the first message in # some user's timeline. favorited_status = session.user_timeline(TEST_ACCOUNT)[0] # Now add this message as favourite. session.create_favorite(favorited_status.id) # Now we are ready to test the self.favorites API. ############################################ # self.favorites # #------------------------------------------# # /favorites/{screen_name}.json # ############################################ # We shall find the first favorite message and compare the id. favorite_status = session.favorites()[0] self.assertTrue(isinstance(favorite_status, Status)) self.assertEqual(favorite_status.id, favorited_status.id) # This is an UserNotFound, since the screen_name is invalid. self.assertRaises( UserNotFound, session.favorites, FAKE_SCREEN_NAME, ) ############################################ # self.destroy_favorite # #------------------------------------------# # /favorites/destroy/{id}.json # ############################################ # This is an illegal call, since the id specified is invalid. self.assertRaises( IllegalCall, session.destroy_favorite, FAKE_ID, ) # This would work session.destroy_favorite(favorited_status.id) def status_api(self): """ This function would test the status related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ############################################ # self.update # #------------------------------------------# # /statuses/update.json # ############################################ # This is an IllegalCall, since the message is too long. self.assertRaises( IllegalCall, session.update, FAKE_MESSAGE_CN, ) # This is an IllegalCall, since the message is too long. self.assertRaises( IllegalCall, session.update, FAKE_MESSAGE_EN, ) newstatus = session.update(TEST_MESSAGE_CN) self.assertTrue(isinstance(newstatus, Status)) # TODO, when we have fully implemented reply and retweet, we have to # add more test case here. ################################################ # self.user_timeline # #----------------------------------------------# # /statuses/user_timeline/{screen_name}.json # ################################################ # The following screen_name is invalid. self.assertRaises( UserNotFound, session.user_timeline, FAKE_SCREEN_NAME, ) # Get the time line. I do not understand why it take so long to refresh # the timeline. 20 is not enough. # The following code would sometimes mysteriously fail. if self.fulltest: time.sleep(30) statuses = session.user_timeline() self.assertEqual(statuses[0].id, newstatus.id) ############################################ # self.show_status # #------------------------------------------# # /statuses/show/{id}.json # ############################################ # This is an IllegalCall, since the id is invalid. self.assertRaises( IllegalCall, session.show_status, FAKE_ID, ) self.assertEqual( session.show_status(newstatus.id).text, TEST_MESSAGE_CN.strip(), ) ############################################ # self.destroy_status # #------------------------------------------# # /statuses/destroy/{id}.json # ############################################ # This is IllegalCall, since the message id is invalid. self.assertRaises( IllegalCall, session.destroy_status, FAKE_ID, ) # This should work session.destroy_status(newstatus.id) # Get the time line. I do not understand why it take so long to refresh # the timeline. 20 is not enough. 30 would work most of the time. # The following code would sometimes mysteriously fail. if self.fulltest: time.sleep(30) statuses = session.user_timeline() self.assertNotEqual(statuses[0].id, newstatus.id) ############################################ # self.home_timeline # # self.mentions # #------------------------------------------# # /statuses/home_timeline.json # # /statuses/mentions.json # ############################################ # Not much can be done for home timeline and mentions. statuses = session.home_timeline() self.assertTrue(isinstance(statuses[0], Status)) statuses = session.mentions() self.assertTrue(isinstance(statuses[0], Status))
[ [ 8, 0, 0.0119, 0.0153, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0203, 0.0017, 0, 0.66, 0.0556, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.022, 0.0017, 0, 0.66,...
[ "\"\"\"\nAuthor: Xia Kai <xiaket@gmail.com>\nFilename: tests.py\nType: Unit test module\nLast modified: 2010-05-21 16:51\n\nDescription:\nThis file contains unit test cases for this project.", "import os", "import time", "import unittest", "from models import T163UserBase, User, Fol...
#!/usr/bin/env python #coding=utf-8 """ Author: Xia Kai <xiaket@gmail.com> Filename: utils.py Type: Utility Last modified: 2010-07-18 14:06 Description: Utility functions for this project. """ import locale import os import urllib2 from datetime import datetime, timedelta ############## # Exceptions # ############## class AuthenticationError(RuntimeError): """ Exception caused by incorrect username/password. """ pass class UserNotFound(ValueError): """ Exception caused by querying a user that does not exist. """ pass class IllegalCall(ValueError): """ Exception caused by illegal call, e.g., trying to remove other people's status, or a message longer than 163 characters. """ pass class UnknownError(RuntimeError): """ Unexpected HTTP code returned. """ pass ############## # Decorators # ############## def require_login(func): """ This is a decorator inspired by a similar decorator in Django. """ def morewrapped(func): def wrapped(kls, *args, **kwargs): if not kls.logged_in: kls._login() return func(kls, *args, **kwargs) return wrapped return morewrapped(func) def check_screen_name(func): """ This decorator would check the screen_name in the parameter of the original function. It is to be noted that the screen must be the first argument if we are using a positional parameter. """ def morewrapped(func): def wrapped(kls, *args, **kwargs): if 'screen_name' in kwargs: _screen_name = kwargs['screen_name'] elif len(args): _screen_name = args[0] else: _screen_name = None if _screen_name: # If the screen_name is set, we shall check if it is a valid # screen_name. We do this by visiting the homepage of this # screen_name: _url = "/users/show.json?screen_name=%s" % _screen_name _message = "Specified user does not exist." _err_dict = { 404: (UserNotFound, _message), } kls.request(_url, errors=_err_dict) return func(kls, *args, **kwargs) return wrapped return morewrapped(func) def check_status_id(func): """ This decorator would check the screen_name in the parameter of the original function. It is to be noted that the screen must be the first argument if we are using a positional parameter. """ def morewrapped(func): def wrapped(kls, *args, **kwargs): if 'status_id' in kwargs: _status_id = kwargs['status_id'] elif len(args) != 0: _status_id = args[0] kls.show_status(_status_id) return func(kls, *args, **kwargs) return wrapped return morewrapped(func) def parse_timestring(timestring): """ Accept a time string, parse it and return a datetime object. >>> parse_timestring("Mon Apr 26 10:49:29 +0800 2010") datetime.datetime(2010, 4, 26, 2, 49, 29) >>> parse_timestring("Mon Apr 26 10:49:29 -0800 2010") datetime.datetime(2010, 4, 26, 18, 49, 29) >>> parse_timestring("Mon Apr 26 10:49:29 +0830 2010") datetime.datetime(2010, 4, 26, 2, 19, 29) """ oldlocale = locale.getlocale(locale.LC_TIME) # On different OS platform, setlocale would have to be called differently. if os.name =='nt': locale.setlocale(locale.LC_TIME, 'english') elif os.name =='posix': locale.setlocale(locale.LC_TIME, 'en_US.UTF-8') strf = timestring[:20] + timestring[26:] created_at = datetime.strptime(strf, "%a %b %d %H:%M:%S %Y") # set it back. locale.setlocale(locale.LC_TIME, oldlocale) delta = timestring[20:25] hour = int(delta[:3]) minute = int(delta[3:]) return created_at - timedelta(hours=hour, minutes=minute) class RedirectHandler(urllib2.HTTPRedirectHandler): def http_error_302(self, req, fp, code, msg, headers): """ For the moment, t.163.com would not return 404 status code correctly. Instead, it would return a 302 and redirect user to a page that will display 404 information. This would make web user happy, but we have to do extra to make our API elegant. Thus we have this handler to correctly raise 404 code. """ result = urllib2.HTTPRedirectHandler.http_error_302( self, req, fp, code, msg, headers) if headers['location'] == 'http://t.163.com/notfound': raise urllib2.HTTPError(req.get_full_url(), 404, msg, headers, fp) return result if __name__ == "__main__": import doctest doctest.testmod()
[ [ 8, 0, 0.0422, 0.0542, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0723, 0.006, 0, 0.66, 0.0714, 884, 0, 1, 0, 0, 884, 0, 0 ], [ 1, 0, 0.0783, 0.006, 0, 0.66, ...
[ "\"\"\"\nAuthor: Xia Kai <xiaket@gmail.com>\nFilename: utils.py\nType: Utility\nLast modified: 2010-07-18 14:06\n\nDescription:\nUtility functions for this project.", "import locale", "import os", "import urllib2", "from datetime import datetime, timedelta", "class AuthenticationE...
#!/usr/bin/env python #coding=utf-8 """ Author: Xia Kai <xiaket@gmail.com> Filename: tests.py Type: Unit test module Last modified: 2010-05-21 16:51 Description: This file contains unit test cases for this project. """ import os import time import unittest from models import T163UserBase, User, Follower, Status, DirectMessage from models import SearchResult, SearchHit, UserSearchResult, UserSearchHit from session import T163Session as Session from utils import AuthenticationError, UserNotFound, IllegalCall # This FAKE_USER should not be available as an 163 account. FAKE_USER = "xiakai.nju@gmail.com" FAKE_PASS = "fakepass" # This should be a valid account, preferably, at your disposal. If you do not # have another account, at least find some account that is following you. and # you are not following. The following test case would assume that the # TEST_ACCOUNT is following the account that you give to __init__. # TEST_ACCOUNT_ALT should be someone who is not following you. TEST_ACCOUNT = "xiaket" TEST_ACCOUNT_ALT = "zhangjiawei" # This should be an invalid screen_name FAKE_SCREEN_NAME = "aslfkqwlalsdlfalkw" # This should be an invalid user/status id. FAKE_ID = "398066" # These are test message contents, variables with prefix FAKE are longer than # 163 characters, while variables with prefix TEST lay within the constraint. FAKE_MESSAGE_EN = u"abcdefghijklmnopqrstuvwxyz "*7 TEST_MESSAGE_EN = u"abcdefghijklmnopqrstuvwxyz "*6 FAKE_MESSAGE_CN = u"中文消息长度测试 "*19 TEST_MESSAGE_CN = u"中文消息长度测试 "*18 class SessionTests(unittest.TestCase): """ This class would test authentication related cases. In summary, we have: 1. No username provided. 2. Invalid username 3. Invalid username/password pair. 4. Invalid cookie file. 5. Invalid cookie file/username pair. 6. Valid cookie file removed after Session initilization. """ def __init__(self, testname, username=None, password=None, fulltest=False): """ Over default __init__ method to allow import of username and password. Setup username and passwords. Some tests are somehow dangerous, we do them with a fulltest flag. """ super(SessionTests, self).__init__(testname) self.username = username self.password = password self.fulltest = fulltest if fulltest: print "performing full test." def session_init(self): """ This function would test the __init__ method of Session. There sure be no cookie file both before and after this test case. """ # If neither username nor cookiefile is given to __init__. An # AuthenticationError would no doubt be raised. self.assertRaises(AuthenticationError, Session) # Should raise an AuthenticationError if an invalid username is # provided. self.assertRaises(AuthenticationError, Session, username=FAKE_USER) # This should not raise Exceptions, since it is a valid # username/password pair. Session(username=self.username, password=self.password) # This should not raise Exceptions, even though neither username nor # password is provided. Session(cookiefile="%s.txt" % self.username) # Since we have a valid cookie file now, anything given as username or # password would be ignored, so the following example would work. Session(cookiefile="%s.txt" % self.username, username=FAKE_USER) # Remove existing cookie file before we continue. The cookie should # have been created before. os.remove("%s.txt" % self.username) # This would cause AuthenticationError, since the cookiefile provided # does not exist. self.assertRaises( AuthenticationError, Session, cookiefile="%s.txt" % FAKE_USER, ) # This would work, since a fallback username/password scheme would # work. But this would save cookie to "%s.txt" % FAKE_USER, instead of # "%s.txt" % self.username. So we shall remove the cookiefile after # this test. Session( cookiefile="%s.txt" % FAKE_USER, username=self.username, password=self.password, ) os.remove("%s.txt" % FAKE_USER) # This should raise AuthenticationError, since it is an invalid # username/password pair. # CAUTION: This is dangerous. Frequent test of the following test would # lock your valid account up. if self.fulltest: self.assertRaises( AuthenticationError, Session, username=self.username, password=FAKE_PASS, ) def relation_api(self): """ This function would test the relationship related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ########################## # self.show_friendship # #------------------------# # /friendships/show.json # ########################## # Calling Session.show_friendship(target_screen_name=FAKE_SCREEN_NAME) # should cause an exception. self.assertRaises( UserNotFound, session.show_friendship, target_screen_name=FAKE_SCREEN_NAME, ) # This time, the target is valid, while the source_id is invalid. It # should also cause an exception. self.assertRaises( UserNotFound, session.show_friendship, source_id=FAKE_ID, target_screen_name=TEST_ACCOUNT, ) # This time, no target is provided, this should cause another # exception. self.assertRaises( IllegalCall, session.show_friendship, source_id=FAKE_ID, ) # This should work, giving the following relationship between # TEST_ACCOUNT and self.username. fo, foed = session.show_friendship( target_screen_name=TEST_ACCOUNT ) self.assertTrue( foed, "TEST_ACCOUNT:%s should follow %s" % (TEST_ACCOUNT, self.username), ) ########################################## # self.followers # #----------------------------------------# # /statuses/followers/{screen_name}.json # ########################################## # User current user's screen_name by default, so this should equal. self.assertEqual( [user.id for user in session.followers()], [user.id for user in session.followers(session.screen_name)], ) # This should never return an empty list, since at least TEST_ACCOUNT # is following self.username. Items in the list should be an instance # of Follower. followers = session.followers(session.screen_name) self.assertTrue(isinstance(followers[0], Follower)) # Test again, TEST_ACCOUNT should be following self.username self.assertTrue(TEST_ACCOUNT in [u.screen_name for u in followers]) # This should raise an exception, for the screen_name is invalid. self.assertRaises( UserNotFound, session.followers, screen_name=FAKE_SCREEN_NAME, ) # The above should work for both positional arguments and keyword # arguments. self.assertRaises( UserNotFound, session.followers, FAKE_SCREEN_NAME, ) ########################################## # self.friends # #----------------------------------------# # /statuses/friends/{screen_name}.json # ########################################## # This should give a list of Follower objects. friends = session.friends(TEST_ACCOUNT) self.assertTrue(isinstance(friends[0], Follower)) # Since an almost identical API is well tested(I hope!) above, I see no # point repeating it here. ############################################ # self.create_friendship # #------------------------------------------# # /friendships/create/{screen_name}.json # ############################################ # As before, this function is decorated with check_screen_name. # So we shall get a UserNotFound with an invalid screen_name. self.assertRaises( UserNotFound, session.create_friendship, FAKE_SCREEN_NAME, ) # Follow yourself would get an IllegalCall exception. self.assertRaises( IllegalCall, session.create_friendship, session.screen_name, ) # Before we continue, we shall follow # This should work. session.create_friendship(TEST_ACCOUNT) ############################################ # self.destroy_friendship # #------------------------------------------# # /friendships/destroy/{screen_name}.json # ############################################ # This should work. session.destroy_friendship(TEST_ACCOUNT) # It cannot be done twice: self.assertRaises( IllegalCall, session.destroy_friendship, TEST_ACCOUNT, ) # Nor can we unfollow someone who do not exist. self.assertRaises( UserNotFound, session.destroy_friendship, FAKE_SCREEN_NAME, ) def mail_api(self): """ This function would test the direct message related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ############################################ # self.new_direct_message # #------------------------------------------# # /direct_messages/new.json # ############################################ # This is an IllegalCall, since the receiver is invalid. self.assertRaises( IllegalCall, session.new_direct_message, FAKE_SCREEN_NAME, TEST_MESSAGE_CN, ) # This is an IllegalCall, since the message is too long. self.assertRaises( IllegalCall, session.new_direct_message, TEST_ACCOUNT, FAKE_MESSAGE_CN, ) # This is an IllegalCall, since the message is too long. self.assertRaises( IllegalCall, session.new_direct_message, TEST_ACCOUNT, FAKE_MESSAGE_EN, ) # This is an IllegalCall, since you cannot send a mail to yourself. self.assertRaises( IllegalCall, session.new_direct_message, session.screen_name, TEST_MESSAGE_EN, ) # This is an IllegalCall, since TEST_ACCOUNT_ALT is not following you. self.assertRaises( IllegalCall, session.new_direct_message, TEST_ACCOUNT_ALT, TEST_MESSAGE_EN, ) # Finally, this should work. message = session.new_direct_message(TEST_ACCOUNT, TEST_MESSAGE_EN) self.assertTrue(isinstance(message, DirectMessage)) # A direct message is sent to TEST_ACCOUNT, we shall retrieve the first # message in outbox and check if it is the same one. ############################################ # self.sent_direct_messages # #------------------------------------------# # /direct_messages/sent.json # ############################################ messages = session.sent_direct_messages() self.assertTrue(isinstance(messages[0], DirectMessage)) self.assertEqual(messages[0].id, message.id) ############################################ # self.destroy_direct_message # #------------------------------------------# # /direct_messages/destroy/{id}.json # ############################################ # This is an illegal call, since the id specified is invalid. self.assertRaises( IllegalCall, session.destroy_direct_message, FAKE_ID, ) removed_message = session.destroy_direct_message(message.id) self.assertEqual(removed_message.id, message.id) # This is an illegal call, since you cannot remove the same direct # message twice. self.assertRaises( IllegalCall, session.destroy_direct_message, message.id, ) ############################################ # self.direct_messages # #------------------------------------------# # /direct_messages.json # ############################################ # If there are direct messages in your inbox, we shall try to make sure # that it is a DirectMessage instance. messages = session.direct_messages() if len(messages) != 0: self.assertTrue(isinstance(messages[0], DirectMessage)) def search_api(self): """ This function would test the search related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ############################################ # self.search # #------------------------------------------# # /search.json # ############################################ # This is an IllegalCall, since no query keyword is specified. self.assertRaises( TypeError, session.search, ) # I hope I'm not a narcissist... search_result = session.search(TEST_ACCOUNT) self.assertTrue(isinstance(search_result, SearchResult)) result = search_result.result[0] self.assertTrue(isinstance(result, SearchHit)) ############################################ # self.user_search # #------------------------------------------# # /1/user/search.json # ############################################ search_result = session.user_search(TEST_ACCOUNT) self.assertTrue(isinstance(search_result, UserSearchResult)) result = search_result.result[0] self.assertTrue(isinstance(result, UserSearchHit)) def favorite_api(self): """ This function would test favorite related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ############################################ # self.create_favorite # #------------------------------------------# # /favorites/create/{id}.json # ############################################ # This is an IllegalCall, since the id is invalid. self.assertRaises( IllegalCall, session.create_favorite, FAKE_ID, ) # We shall get a valid message id by looking for the first message in # some user's timeline. favorited_status = session.user_timeline(TEST_ACCOUNT)[0] # Now add this message as favourite. session.create_favorite(favorited_status.id) # Now we are ready to test the self.favorites API. ############################################ # self.favorites # #------------------------------------------# # /favorites/{screen_name}.json # ############################################ # We shall find the first favorite message and compare the id. favorite_status = session.favorites()[0] self.assertTrue(isinstance(favorite_status, Status)) self.assertEqual(favorite_status.id, favorited_status.id) # This is an UserNotFound, since the screen_name is invalid. self.assertRaises( UserNotFound, session.favorites, FAKE_SCREEN_NAME, ) ############################################ # self.destroy_favorite # #------------------------------------------# # /favorites/destroy/{id}.json # ############################################ # This is an illegal call, since the id specified is invalid. self.assertRaises( IllegalCall, session.destroy_favorite, FAKE_ID, ) # This would work session.destroy_favorite(favorited_status.id) def status_api(self): """ This function would test the status related APIs. """ # Initialize a session. session = Session(username=self.username, password=self.password) ############################################ # self.update # #------------------------------------------# # /statuses/update.json # ############################################ # This is an IllegalCall, since the message is too long. self.assertRaises( IllegalCall, session.update, FAKE_MESSAGE_CN, ) # This is an IllegalCall, since the message is too long. self.assertRaises( IllegalCall, session.update, FAKE_MESSAGE_EN, ) newstatus = session.update(TEST_MESSAGE_CN) self.assertTrue(isinstance(newstatus, Status)) # TODO, when we have fully implemented reply and retweet, we have to # add more test case here. ################################################ # self.user_timeline # #----------------------------------------------# # /statuses/user_timeline/{screen_name}.json # ################################################ # The following screen_name is invalid. self.assertRaises( UserNotFound, session.user_timeline, FAKE_SCREEN_NAME, ) # Get the time line. I do not understand why it take so long to refresh # the timeline. 20 is not enough. # The following code would sometimes mysteriously fail. if self.fulltest: time.sleep(30) statuses = session.user_timeline() self.assertEqual(statuses[0].id, newstatus.id) ############################################ # self.show_status # #------------------------------------------# # /statuses/show/{id}.json # ############################################ # This is an IllegalCall, since the id is invalid. self.assertRaises( IllegalCall, session.show_status, FAKE_ID, ) self.assertEqual( session.show_status(newstatus.id).text, TEST_MESSAGE_CN.strip(), ) ############################################ # self.destroy_status # #------------------------------------------# # /statuses/destroy/{id}.json # ############################################ # This is IllegalCall, since the message id is invalid. self.assertRaises( IllegalCall, session.destroy_status, FAKE_ID, ) # This should work session.destroy_status(newstatus.id) # Get the time line. I do not understand why it take so long to refresh # the timeline. 20 is not enough. 30 would work most of the time. # The following code would sometimes mysteriously fail. if self.fulltest: time.sleep(30) statuses = session.user_timeline() self.assertNotEqual(statuses[0].id, newstatus.id) ############################################ # self.home_timeline # # self.mentions # #------------------------------------------# # /statuses/home_timeline.json # # /statuses/mentions.json # ############################################ # Not much can be done for home timeline and mentions. statuses = session.home_timeline() self.assertTrue(isinstance(statuses[0], Status)) statuses = session.mentions() self.assertTrue(isinstance(statuses[0], Status))
[ [ 8, 0, 0.0119, 0.0153, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0203, 0.0017, 0, 0.66, 0.0556, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.022, 0.0017, 0, 0.66,...
[ "\"\"\"\nAuthor: Xia Kai <xiaket@gmail.com>\nFilename: tests.py\nType: Unit test module\nLast modified: 2010-05-21 16:51\n\nDescription:\nThis file contains unit test cases for this project.", "import os", "import time", "import unittest", "from models import T163UserBase, User, Fol...
#!/usr/bin/env python #coding=utf-8 """ Author: Xia Kai <xiaket@gmail.com> Filename: __init__.py Type: Module meta information holder Last modified: 2010-05-16 20:44 Description: """ __author__ = "xiaket" __version__ = "0.2b"
[ [ 8, 0, 0.5, 0.6154, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.9231, 0.0769, 0, 0.66, 0.5, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 1, 0.0769, 0, 0.66, 1, ...
[ "\"\"\"\nAuthor: Xia Kai <xiaket@gmail.com>\nFilename: __init__.py\nType: Module meta information holder\nLast modified: 2010-05-16 20:44\n\nDescription:\n\"\"\"", "__author__ = \"xiaket\"", "__version__ = \"0.2b\"" ]
import pygtk pygtk.require('2.0') import gtk class TreeViewColumnExample(object): # close the window and quit def delete_event(self, widget, event, data=None): gtk.main_quit() return False def __init__(self): # Create a new window self.window = gtk.Window(gtk.WINDOW_TOPLEVEL) self.window.set_title("TreeViewColumn Example") self.window.connect("delete_event", self.delete_event) # create a liststore with one string column to use as the model self.liststore = gtk.ListStore(str, str, str, 'gboolean') # create the TreeView using liststore self.treeview = gtk.TreeView(self.liststore) # create the TreeViewColumns to display the data self.tvcolumn = gtk.TreeViewColumn('Pixbuf and Text') self.tvcolumn1 = gtk.TreeViewColumn('Text Only') # add a row with text and a stock item - color strings for # the background self.liststore.append(['Open', gtk.STOCK_OPEN, 'Open a File', True]) self.liststore.append(['New', gtk.STOCK_NEW, 'New File', True]) self.liststore.append(['Print', gtk.STOCK_PRINT, 'Print File', False]) # add columns to treeview self.treeview.append_column(self.tvcolumn) self.treeview.append_column(self.tvcolumn1) # create a CellRenderers to render the data self.cellpb = gtk.CellRendererPixbuf() self.cell = gtk.CellRendererText() self.cell1 = gtk.CellRendererText() # set background color property self.cellpb.set_property('cell-background', 'yellow') self.cell.set_property('cell-background', 'cyan') self.cell1.set_property('cell-background', 'pink') # add the cells to the columns - 2 in the first self.tvcolumn.pack_start(self.cellpb, False) self.tvcolumn.pack_start(self.cell, True) self.tvcolumn1.pack_start(self.cell1, True) self.tvcolumn.set_attributes(self.cellpb, stock_id=1) self.tvcolumn.set_attributes(self.cell, text=0) self.tvcolumn1.set_attributes(self.cell1, text=2, cell_background_set=3) # make treeview searchable self.treeview.set_search_column(0) # Allow sorting on the column self.tvcolumn.set_sort_column_id(0) # Allow drag and drop reordering of rows self.treeview.set_reorderable(True) self.window.add(self.treeview) self.window.show_all() def main(): gtk.main() if __name__ == "__main__": tvcexample = TreeViewColumnExample() main()
[ [ 1, 0, 0.013, 0.013, 0, 0.66, 0, 106, 0, 1, 0, 0, 106, 0, 0 ], [ 8, 0, 0.026, 0.013, 0, 0.66, 0.2, 66, 3, 1, 0, 0, 0, 0, 1 ], [ 1, 0, 0.039, 0.013, 0, 0.66, 0....
[ "import pygtk", "pygtk.require('2.0')", "import gtk", "class TreeViewColumnExample(object):\n\n # close the window and quit\n def delete_event(self, widget, event, data=None):\n gtk.main_quit()\n return False\n\n def __init__(self):", " def delete_event(self, widget, event, data=No...
# MySQL for Python import MySQLdb db = MySQLdb.connect ( host = 'localhost', user = 'root', passwd = '', db = 'db_1clic2learn' #bd = 'mysql' ) cursor = db.cursor() cursor.execute('SELECT * FROM db.PL_SQLINJECTION') result = cursor.fetchall() if result: for z in result: print z
[ [ 1, 0, 0.1579, 0.0526, 0, 0.66, 0, 838, 0, 1, 0, 0, 838, 0, 0 ], [ 14, 0, 0.4211, 0.3684, 0, 0.66, 0.2, 761, 3, 4, 0, 0, 242, 10, 1 ], [ 14, 0, 0.6842, 0.0526, 0, ...
[ "import MySQLdb", "db = MySQLdb.connect (\n host = 'localhost',\n user = 'root',\n passwd = '',\n db = 'db_1clic2learn'\n #bd = 'mysql'\n )", "cursor = db.cursor()", "cursor.execute('SELECT * FROM db.PL_SQLINJECTION')", "result = cursor.fetchall()", "if result:\n for z in result:\n ...
import sys import time, tkMessageBox import Controller try: import pygtk pygtk.require("2.0") except: pass try: import gtk import gtk.glade except: sys.exit(1) ######################################################################## class JanelaPrincipal: def __init__(self): # Cria uma nova janela (window) self.janela = gtk.Window(gtk.WINDOW_TOPLEVEL) self.janela.set_position(gtk.WIN_POS_CENTER) self.janela.set_title('Scanner 1Clic2Learn' ) self.janela.set_size_request(1050, 700) self.janela.set_resizable(False) self.janela.set_icon_name('1Clic2Learn') self.janela.set_icon_from_file('./1Clic2Learn-3-Icon.ico') # Adicionando os wigets a janela " self.fixed = gtk.Fixed() self.janela.add(self.fixed) self.fixed.put(gtk.Label('URL da Aplicacao: '), 50, 30) self.txtURL = gtk.Entry() self.txtURL.set_size_request(650, 30) self.fixed.put(self.txtURL, 200, 30) self.btExplorar = gtk.Button('Explorar' ) self.btExplorar.set_size_request(100, 30) self.btExplorar.connect("clicked", self.IniciarExploracao) self.fixed.put(self.btExplorar, 900, 30) self.fixed.put(gtk.Label('Vulnerabilidades: ' ), 50, 90) #criando combo box self.ListVuln = gtk.ListStore(int,str) self.ListVuln.append([1, "SQL Injection"]) self.ListVuln.append([2,"Cross-Site Scripiting"]) self.ComboVul = gtk.combo_box_new_with_model_and_entry (self.ListVuln) self.ComboVul.set_entry_text_column(1) self.ComboVul.connect("changed", self.on_name_combo_changed) self.ComboVul.set_size_request(300,30) self.ComboVul.set_active(0) self.fixed.put(self.ComboVul, 200, 90) #check box para criterio de parada self.checkParada = gtk.CheckButton('Parar ao encontrar a primeira falha') self.checkParada.set_size_request(300, 30) self.checkParada.set_active(1) self.fixed.put(self.checkParada, 600, 90) ##############################RESULTADO########################################################### # Criando a janela para receber o resultado dos ataques. self.scrollwinResult = gtk.ScrolledWindow() self.scrollwinResult.set_size_request(535,500) self.scrollwinResult.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) self.listResult = gtk.ListStore(str, str, str, str, str, 'gboolean') # Criando Treeview com uma lista self.treeview = gtk.TreeView(self.listResult) self.scrollwinResult.add(self.treeview) self.fixed.put(self.scrollwinResult,50,150) # Criando cabecalho das colunas self.tvcolumn = gtk.TreeViewColumn('Situacao') self.tvcolumn.set_alignment(xalign=0.5) self.tvcolumn1 = gtk.TreeViewColumn('Criticidade') self.tvcolumn1.set_alignment(xalign=0.5) self.tvcolumn2 = gtk.TreeViewColumn('URL') self.tvcolumn2.set_alignment(xalign=0.5) self.tvcolumn3 = gtk.TreeViewColumn('Componente Testado') self.tvcolumn3.set_alignment(xalign=0.5) # aadiciona colunas na treeview/tabela self.treeview.append_column(self.tvcolumn) self.treeview.append_column(self.tvcolumn1) self.treeview.append_column(self.tvcolumn2) self.treeview.append_column(self.tvcolumn3) # criando as cedulas self.cellFalha = gtk.CellRendererText() self.cellCrit = gtk.CellRendererText() self.cellCrit.set_alignment(xalign=0.5, yalign=0.5) self.cellComp = gtk.CellRendererText() self.cellTest = gtk.CellRendererText() # set cor de fundo self.cellFalha.set_property("foreground", "red") # adicionando cedulas nas colunas self.tvcolumn.pack_start(self.cellFalha, True) self.tvcolumn1.pack_start(self.cellCrit, True) self.tvcolumn2.pack_start(self.cellComp, True) self.tvcolumn3.pack_start(self.cellTest, True) self.tvcolumn.set_attributes(self.cellFalha, text=0) self.tvcolumn1.set_attributes(self.cellCrit, text=1) self.tvcolumn2.set_attributes(self.cellComp, text=2) self.tvcolumn3.set_attributes(self.cellTest, text=3) # setando opcao para pesquisa self.treeview.set_search_column(0) # Permitindo Ordenacao nas colunas self.tvcolumn.set_sort_column_id(0) self.tvcolumn1.set_sort_column_id(0) self.tvcolumn2.set_sort_column_id(0) self.tvcolumn3.set_sort_column_id(0) self.LabelInfo = gtk.Label() self.LabelInforServer = gtk.Label() self.LabelResultado = gtk.Label() self.LabelMitigacao = gtk.Label() self.LabelInforServer.set_markup("<b>Informacoes do Servidor:</b>") self.fixed.put(self.LabelInforServer, 600, 130) self.LabelResultado.set_markup("<b>Resultados:</b>") self.fixed.put(self.LabelResultado, 50, 130) self.LabelMitigacao.set_markup("<b>Mitigacao:</b>") self.fixed.put(self.LabelMitigacao, 600, 360) #CRIANDO CAMPO DE INFORMACOES DO SERVIDOR self.scrollwinServer = gtk.ScrolledWindow() self.scrollwinServer.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) self.InfoServer = gtk.TextBuffer() self.TextViewServer = gtk.TextView(self.InfoServer) self.TextViewServer.set_editable(False) self.TextViewServer.set_cursor_visible(False) self.TextViewServer.set_wrap_mode(gtk.WRAP_WORD) self.TextViewServer.set_size_request(400,170) self.scrollwinServer.add(self.TextViewServer) self.fixed.put(self.scrollwinServer, 600, 150) #CRIANDO CAMPO DE INFORMACOES DE MITIGACAO self.scrollwinMit = gtk.ScrolledWindow() self.scrollwinMit.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) self.TextBufferMitigacao = gtk.TextBuffer() self.TextViewMit = gtk.TextView(self.TextBufferMitigacao) self.TextViewMit.set_editable(False) self.TextViewMit.set_cursor_visible(False) self.TextViewMit.set_wrap_mode(gtk.WRAP_WORD) self.TextViewMit.set_size_request(400,270) self.scrollwinMit.add(self.TextViewMit) self.fixed.put(self.scrollwinMit,600,380) #conectando a janela aos destrutores para finalizar o programa self.janela.connect('delete_event', self.delete_event) self.janela.connect('destroy', self.destroy) self.janela.show_all() def ShowError(self, title, mensagem): dialog = gtk.MessageDialog(parent=self.janela,flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT, type=gtk.MESSAGE_INFO, buttons=gtk.BUTTONS_OK, message_format=mensagem) dialog.set_title(title) dialog.set_position(gtk.WIN_POS_CENTER_ALWAYS) # Exibe a caixa dialog.run() dialog.destroy() def IniciarExploracao(self, widget, data=None): Inicial= time.time() print "Start : %s" %Inicial #LIMPANDO AS VARIAVEIS DA JANELA self.TextBufferMitigacao.set_text("") self.listResult.clear() #self.progressbar.destroy() self.InfoServer.set_text("") self.selc = self.on_name_combo_changed(self.ComboVul) #VERIFICANDO SE EXISTE URL if self.txtURL.get_text() == "": # EXIBINDO UMA POPUP DE ALERTA self.ShowError('ALERTA', 'ATENCAO: Favor preencher o campo URL da Aplicacao!') #VERIFICANDO SE FOI SELECIONADA ALGUMA OPCAO elif (self.selc > 2) or (self.selc == 999): self.InfoServer.set_text("") self.listResult.clear() else: # Utilizando a classe CONTROLLER self.control = Controller.Controller(self.txtURL.get_text()) self.control.select_Plugin(self.selc,self.checkParada.state) #envia o plugin e o criterio de parada(1 - para no primeiro) self.teste_url = [] self.teste_url = self.control.check_URL() if len(self.teste_url) == 2: # se a lista tiver 2 colunas entao ocorreu um erro self.ShowError(str(self.teste_url[0]), str(self.teste_url[1])) else: #INSERIR AS INFORMACOES DO SERVIDOR self.a = [] self.a.append(self.control.get_Server_Info()) self.b = str() for n in self.a: self.b = (str(n)) self.InfoServer.set_text(self.b) # ADICIONA O RESULTADO DO ATAQUE NA LISTA self.resultado = [] self.resultado = self.control.send_Exploiter() try: if len(self.resultado[0]) == 2 and self.resultado[0] == None: self.ShowError(str(self.resultado[0]), str(self.resultado[1])) except: pass else: for res in self.resultado: self.listResult.append(res) #AQUI deve passar a lista da Controller self.treeview.set_model(self.listResult) TempoFinal = round (time.time() - Inicial, 2) print TempoFinal self.ShowError("SUCESSO!", "Exploração realizado com sucesso! \nTempo de Execução: " + str(TempoFinal) + ' segundos') #Exibindo o texto de mitigacao quando selecionar o resultado! self.selecao = self.treeview.get_selection() self.selecao.connect('changed', self.on_select_change) self.janela.show_all() # Retornando False (falso) nesta funcao o GTK ira emitir o sinal de "destroy". Se voce retornar True # (verdadeiro),significa que voce nao quer que a janela seja fechada def delete_event(self, widget, event, data=None): print "Programa encerrado!" return False #FUNCAO CRIADA PARA VERIFICAR O ITEM SELECIONADO NA TREEVIEW SELECIONADA def on_select_change(self, widget): m, itr = widget.get_selected() if itr: #Exibe o item da posicao 4 - mitigacao se for vulnerável if m[itr][5]: self.TextBufferMitigacao.set_text(m[itr][4]) self.scrollwinMit.show_all() else: self.TextBufferMitigacao.set_text("") self.scrollwinMit.show_all() def on_name_combo_changed(self, combo): tree_iter = combo.get_active_iter() model = combo.get_model() if tree_iter == None: self.ShowError('ATENCAO','Nenhuma vulnerabilidade foi selecionada!') return 999 else: row_id, name = model[tree_iter] return row_id # Outro retorno def destroy(self, widget, data=None): gtk.main_quit() def main(self): gtk.main() # Se o programa rodar diretamente ou for passado como um argumento para o interpretador de python # ele criara a JanelaPrincipal e o mostrara. if __name__ == "__main__": Janela = JanelaPrincipal() Janela.main()
[ [ 1, 0, 0.0038, 0.0038, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0075, 0.0038, 0, 0.66, 0.1667, 654, 0, 2, 0, 0, 654, 0, 0 ], [ 1, 0, 0.0113, 0.0038, 0, ...
[ "import sys", "import time, tkMessageBox", "import Controller", "try:\n import pygtk\n pygtk.require(\"2.0\")\nexcept:\n pass", " import pygtk", " pygtk.require(\"2.0\")", "try:\n import gtk\n import gtk.glade\nexcept:\n sys.exit(1)", " im...
#!/usr/bin/env python import urllib,requests,urllib2,socket,mechanize from urlparse import urlsplit,urlparse,urlunparse,urljoin from Data import Data_Access ######################################################################## # CLASSE PLUGINSQL FILHO DE PLUGINS class PluginSQL (object): """ Responsavel por montar e analisar ataques de Injecao de Comandos SQL """ #---------------------------------------------------------------------- def __init__(self): """ Constructor """ self.data_Access = Data_Access(1, "PL_SQLINJECTION") self.attack_Name = "SQL Injection" self.attack_Criticity = "High" #---------------------------------------------------------------------- def get_Attack (self): """ Retorna os ataques que estao no Banco de Dados """ return self.data_Access.getAttack() #---------------------------------------------------------------------- def get_Mitigation (self): """ Retorna as mitigacoes que estao no Banco de Dados """ return self.data_Access.getMitigation() #---------------------------------------------------------------------- def get_Impact (self): """ Retorna o nome da vulnerabilidade """ return self.attack_Criticity #---------------------------------------------------------------------- def get_Response (self): """ Retorna as respostas que estao no Banco de Dados """ return self.data_Access.getResponse() #---------------------------------------------------------------------- def get_Vulnerability (self): """ Retorna o nome da vulnerabilidade """ return self.attack_Name #---------------------------------------------------------------------- def get_Attack_Monted (self, url): """ Retorna a URL concatenada com o ataque """ self.atack_monted = [] for atack in self.getAttack(): self.atack_monted.append(url + ''.join(atack)) return self.atack_monted #---------------------------------------------------------------------- def mount_URL_Attack (self, url): """ Ataque via URL GET """ self.url_attack_parse = urlparse(url) self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?" self.url_atributos_ataques = [] self.atributo_ataque = [] # Pega todos os atributos da URL e os separa em uma lista self.urls_att = [] self.urls_att.append('&'.rpartition(self.url_attack_parse.query)) #---------------------------------------------------------------------- def gen_Form_Attack(self, list_forms, url): """" Realiza uma chamada ao banco de dados para obter a lista de ataques e os inclui nos formularios. LIST_FORMS: Lista contendo os nomes dos formularios presentes na pagina testada URL: Endereco URL testado """ self.list_forms = [] self.list_forms = list_forms self.list_forms_ataques = [] self.aux = [] for ataque in self.get_Attack(): # Atribui os ataques a lista de formularios enviada como parametro for formulario in self.list_forms: self.aux.append([formulario,''.join(ataque)]) self.list_forms_ataques.append([url, self.aux]) self.aux = [] # Retorna uma lista de formularios ja contendo os ataques return self.list_forms_ataques #---------------------------------------------------------------------- def analisa_Resultado (self, html, url_ataque, atributo): """ Analisa o codigo HTML da aplicacao, apos o ataque desferido pela classe Exploiter. HTML: Codigo-fonte resultante do ataque; URL_ATAQUE: Endereco URL da aplicacao testada; ATRIBUTO: Variavel que sofreu o teste """ self.html = str(html) self.attack_Result = [] # Verifica no codigo-fonte HTML a presenca de Strings Error Based SQL Injection for resp in self.get_Response(): # Existe Strings, entao eh vulneravel if self.html.find(''.join(resp)) > 0: # Concatena as informacoes retorno self.attack_Result.append('VULNERAVEL') self.attack_Result.append(self.get_Impact()) self.attack_Result.append(url_ataque) self.attack_Result.append(atributo) self.attack_Result.append(self.get_Mitigation()) # 1 = Ataque funcionou self.attack_Result.append(True) # Nao existe Strings, logo nao eh vulneravel a Error Based SQL Injection else: # Concatena as informacoes retorno self.attack_Result.append('NAO VULNERAVEL') self.attack_Result.append(self.get_Impact()) self.attack_Result.append(url_ataque) self.attack_Result.append(atributo) self.attack_Result.append(self.get_Mitigation()) # 0 = Ataque nao funcionou self.attack_Result.append(False) return self.attack_Result #---------------------------------------------------------------------- def gerar_Ataques (self, url): """ Recebe por parametro uma lista de URLs com atributos e monta uma lista de ataques em cima de cada atributo contido na URL. """ # Define a lista de URLs self.URL_List_Target = [] self.URL_List_Target = url # Define a lista de ataques para cada uma das URLs self.lista_ataques_gerados = [] for URL_Target in self.URL_List_Target: self.url_attack_parse = urlparse(URL_Target) self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?" self.url_atributos_ataques = [] self.atributo_ataque = [] self.lista_var = [] self.lista_var = self.url_attack_parse.query.split('&') # Pega todos os atributos da URL e os separa em uma lista for atributo in self.url_attack_parse.query.split('&'): self.str_var_fix = "" for var_fix in self.lista_var: if var_fix == atributo: pass else: self.str_var_fix = self.str_var_fix + "&" + var_fix # Faz uma chamada ao banco de dados para obter a String de ataque e a concatena ao atributo for atack in self.get_Attack(): self.lista_ataques_gerados.append([self.url_parse_base + atributo + ''.join(atack) + self.str_var_fix, atributo]) # Retorna a lista de ataques gerados pelo metodo return self.lista_ataques_gerados
[ [ 1, 0, 0.0115, 0.0057, 0, 0.66, 0, 614, 0, 5, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0172, 0.0057, 0, 0.66, 0.3333, 857, 0, 4, 0, 0, 857, 0, 0 ], [ 1, 0, 0.023, 0.0057, 0, 0...
[ "import urllib,requests,urllib2,socket,mechanize", "from urlparse import urlsplit,urlparse,urlunparse,urljoin", "from Data import Data_Access", "class PluginSQL (object):\n \"\"\"\n Responsavel por montar e analisar ataques de Injecao de Comandos SQL\n \"\"\" \n #-------------------------------...
#!/usr/bin/env python import urllib,requests,urllib2,socket,mechanize from urlparse import urlsplit,urlparse,urlunparse,urljoin from Data import Data_Access ######################################################################## # CLASSE PLUGINSQL FILHO DE PLUGINS class PluginSQL (object): """ Responsavel por montar e analisar ataques de Injecao de Comandos SQL """ #---------------------------------------------------------------------- def __init__(self): """ Constructor """ self.data_Access = Data_Access(1, "PL_SQLINJECTION") self.attack_Name = "SQL Injection" self.attack_Criticity = "High" #---------------------------------------------------------------------- def get_Attack (self): """ Retorna os ataques que estao no Banco de Dados """ return self.data_Access.getAttack() #---------------------------------------------------------------------- def get_Mitigation (self): """ Retorna as mitigacoes que estao no Banco de Dados """ return self.data_Access.getMitigation() #---------------------------------------------------------------------- def get_Impact (self): """ Retorna o nome da vulnerabilidade """ return self.attack_Criticity #---------------------------------------------------------------------- def get_Response (self): """ Retorna as respostas que estao no Banco de Dados """ return self.data_Access.getResponse() #---------------------------------------------------------------------- def get_Vulnerability (self): """ Retorna o nome da vulnerabilidade """ return self.attack_Name #---------------------------------------------------------------------- def get_Attack_Monted (self, url): """ Retorna a URL concatenada com o ataque """ self.atack_monted = [] for atack in self.getAttack(): self.atack_monted.append(url + ''.join(atack)) return self.atack_monted #---------------------------------------------------------------------- def mount_URL_Attack (self, url): """ Ataque via URL GET """ self.url_attack_parse = urlparse(url) self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?" self.url_atributos_ataques = [] self.atributo_ataque = [] # Pega todos os atributos da URL e os separa em uma lista self.urls_att = [] self.urls_att.append('&'.rpartition(self.url_attack_parse.query)) #---------------------------------------------------------------------- def gen_Form_Attack(self, list_forms, url): """" Realiza uma chamada ao banco de dados para obter a lista de ataques e os inclui nos formularios. LIST_FORMS: Lista contendo os nomes dos formularios presentes na pagina testada URL: Endereco URL testado """ self.list_forms = [] self.list_forms = list_forms self.list_forms_ataques = [] self.aux = [] for ataque in self.get_Attack(): # Atribui os ataques a lista de formularios enviada como parametro for formulario in self.list_forms: self.aux.append([formulario,''.join(ataque)]) self.list_forms_ataques.append([url, self.aux]) self.aux = [] # Retorna uma lista de formularios ja contendo os ataques return self.list_forms_ataques #---------------------------------------------------------------------- def analisa_Resultado (self, html, url_ataque, atributo): """ Analisa o codigo HTML da aplicacao, apos o ataque desferido pela classe Exploiter. HTML: Codigo-fonte resultante do ataque; URL_ATAQUE: Endereco URL da aplicacao testada; ATRIBUTO: Variavel que sofreu o teste """ self.html = str(html) self.attack_Result = [] # Verifica no codigo-fonte HTML a presenca de Strings Error Based SQL Injection for resp in self.get_Response(): # Existe Strings, entao eh vulneravel if self.html.find(''.join(resp)) > 0: # Concatena as informacoes retorno self.attack_Result.append('VULNERAVEL') self.attack_Result.append(self.get_Impact()) self.attack_Result.append(url_ataque) self.attack_Result.append(atributo) self.attack_Result.append(self.get_Mitigation()) # 1 = Ataque funcionou self.attack_Result.append(True) # Nao existe Strings, logo nao eh vulneravel a Error Based SQL Injection else: # Concatena as informacoes retorno self.attack_Result.append('NAO VULNERAVEL') self.attack_Result.append(self.get_Impact()) self.attack_Result.append(url_ataque) self.attack_Result.append(atributo) self.attack_Result.append(self.get_Mitigation()) # 0 = Ataque nao funcionou self.attack_Result.append(False) return self.attack_Result #---------------------------------------------------------------------- def gerar_Ataques (self, url): """ Recebe por parametro uma lista de URLs com atributos e monta uma lista de ataques em cima de cada atributo contido na URL. """ # Define a lista de URLs self.URL_List_Target = [] self.URL_List_Target = url # Define a lista de ataques para cada uma das URLs self.lista_ataques_gerados = [] for URL_Target in self.URL_List_Target: self.url_attack_parse = urlparse(URL_Target) self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?" self.url_atributos_ataques = [] self.atributo_ataque = [] self.lista_var = [] self.lista_var = self.url_attack_parse.query.split('&') # Pega todos os atributos da URL e os separa em uma lista for atributo in self.url_attack_parse.query.split('&'): self.str_var_fix = "" for var_fix in self.lista_var: if var_fix == atributo: pass else: self.str_var_fix = self.str_var_fix + "&" + var_fix # Faz uma chamada ao banco de dados para obter a String de ataque e a concatena ao atributo for atack in self.get_Attack(): self.lista_ataques_gerados.append([self.url_parse_base + atributo + ''.join(atack) + self.str_var_fix, atributo]) # Retorna a lista de ataques gerados pelo metodo return self.lista_ataques_gerados
[ [ 1, 0, 0.0115, 0.0057, 0, 0.66, 0, 614, 0, 5, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0172, 0.0057, 0, 0.66, 0.3333, 857, 0, 4, 0, 0, 857, 0, 0 ], [ 1, 0, 0.023, 0.0057, 0, 0...
[ "import urllib,requests,urllib2,socket,mechanize", "from urlparse import urlsplit,urlparse,urlunparse,urljoin", "from Data import Data_Access", "class PluginSQL (object):\n \"\"\"\n Responsavel por montar e analisar ataques de Injecao de Comandos SQL\n \"\"\" \n #-------------------------------...
#!/usr/bin/env python import urllib,requests,urllib2,socket,mechanize from urlparse import urlsplit,urlparse,urlunparse,urljoin from Data import Data_Access from bs4 import BeautifulSoup ######################################################################## class PluginXSS (object): """ Responsavel por montar e analisar ataques de Injecao de Codigos Script """ #---------------------------------------------------------------------- def __init__(self): """ Constructor """ self.data_Access = Data_Access(2, "PL_XSS") self.attack_Name = "XSS Injection" self.attack_Criticity = "High" #---------------------------------------------------------------------- def get_Attack (self): """ Retorna os ataques que estao no Banco de Dados """ return self.data_Access.getAttack() #---------------------------------------------------------------------- def get_Mitigation (self): """ Retorna as mitigacoes que estao no Banco de Dados """ return self.data_Access.getMitigation() #---------------------------------------------------------------------- def get_Impact (self): """ Retorna o nome da vulnerabilidade """ return self.attack_Criticity #---------------------------------------------------------------------- def get_Response (self): """ Retorna as respostas que estao no Banco de Dados """ return self.data_Access.getResponse() #---------------------------------------------------------------------- def get_Vulnerability (self): """ Retorna o nome da vulnerabilidade """ return self.attack_Name #---------------------------------------------------------------------- def get_Attack_Monted (self, url): """ Retorna a URL concatenada com o ataque """ self.atack_monted = [] for atack in self.getAttack(): self.atack_monted.append(url + ''.join(atack)) return self.atack_monted #---------------------------------------------------------------------- def mount_URL_Attack (self, url): """ Ataque via URL GET """ self.url_attack_parse = urlparse(url) self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?" self.url_atributos_ataques = [] self.atributo_ataque = [] # Pega todos os atributos da URL e os separa em uma lista self.urls_att = [] self.urls_att.append('&'.rpartition(self.url_attack_parse.query)) #---------------------------------------------------------------------- def gen_Form_Attack(self, list_forms, url): """ Inclui no formulario o ataque """ self.list_forms = [] self.list_forms = list_forms self.list_forms_ataques = [] self.aux = [] for ataque in self.get_Attack(): for formulario in self.list_forms: self.aux.append([formulario,''.join(ataque)]) self.list_forms_ataques.append([url, self.aux]) self.aux = [] return self.list_forms_ataques #---------------------------------------------------------------------- def analisa_Resultado (self, html, url_ataque, atributo): """ Analisa o codigo da pagina HTML se existe a presenca de Injecao de codigos Script """ self.bsxss = BeautifulSoup(html, 'lxml') self.script = self.bsxss.find_all ('script') self.attack_Result = [] # Verifica se existe getResponse no codigo-fonte da pagina atacada for resp in self.script: if resp.find('<script>alert(\'__XSS__\')</script>') != -1: # Concatena as informacoes retorno self.attack_Result.append('VULNERAVEL') self.attack_Result.append(self.get_Impact()) self.attack_Result.append(url_ataque) self.attack_Result.append(atributo) self.attack_Result.append(self.get_Mitigation()) # 1 = Ataque funcionou self.attack_Result.append(True) else: # Concatena as informacoes retorno self.attack_Result.append('NAO VULNERAVEL') self.attack_Result.append(self.get_Impact()) self.attack_Result.append(url_ataque) self.attack_Result.append(atributo) self.attack_Result.append(self.get_Mitigation()) # 0 = Ataque nao funcionou self.attack_Result.append(False) return self.attack_Result #---------------------------------------------------------------------- def gerar_Ataques (self, url): """ Ataca os parametros da URL informada """ self.URL_List_Target = [] self.URL_List_Target = url self.lista_ataques_gerados = [] for URL_Target in self.URL_List_Target: self.url_attack_parse = urlparse(URL_Target) self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?" self.url_atributos_ataques = [] self.atributo_ataque = [] self.lista_var = [] self.lista_var = self.url_attack_parse.query.split('&') # Pega todos os atributos da URL e os separa em uma lista for atributo in self.url_attack_parse.query.split('&'): self.str_var_fix = "" for var_fix in self.lista_var: if var_fix == atributo: pass else: self.str_var_fix = self.str_var_fix + "&" + var_fix # Pega o atributo e inclui o ataque nele for atack in self.get_Attack(): self.lista_ataques_gerados.append([self.url_parse_base + atributo + ''.join(atack) + self.str_var_fix, atributo]) return self.lista_ataques_gerados
[ [ 1, 0, 0.0118, 0.0059, 0, 0.66, 0, 614, 0, 5, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0176, 0.0059, 0, 0.66, 0.25, 857, 0, 4, 0, 0, 857, 0, 0 ], [ 1, 0, 0.0235, 0.0059, 0, 0....
[ "import urllib,requests,urllib2,socket,mechanize", "from urlparse import urlsplit,urlparse,urlunparse,urljoin", "from Data import Data_Access", "from bs4 import BeautifulSoup", "class PluginXSS (object):\n \"\"\"\n Responsavel por montar e analisar ataques de Injecao de Codigos Script\n \"\"\" \...
import urllib from Business import PluginSQL from bs4 import BeautifulSoup from urlparse import urlparse # Estava pensando em colocar o Exploiter para fazer o ataque e a análise ######################################################################## class Exploiter(object): """""" #---------------------------------------------------------------------- # Construtor recebe a URL alvo e o objeto Plugin dos ataques #def __init__(self, url, pluginAttack): def __init__(self, url): """Constructor""" try: self.url = url # Cria a conexao com a URL informada self.html = urllib.urlopen (url) # Cria o objeto BeautifulSoup self.bs = BeautifulSoup(self.html.read(), "lxml") self.bs = BeautifulSoup() except urllib.NameError as e: print e # Exibe informacoes do servidor web def getServerInfo (self): #for header, value in self.url.headers.items(): #print header + ' : ' + value return self.url.headers.items() # Metodo que envia ataque para o servidor def sendAttack (self, attack): self.pl_plugin = attack #self.concatenacao = [] #self.i = 0 for n in attack.get_Attack_Monted (self.url): #self.concatenacao.append(self.url+ str(n)) self.html_attack = urllib.urlopen (n) # Verifica se existe getResponse no codigo-fonte da pagina atacada #if self.html_attack.read() != "MySQL": print self.html_attack.read() # Sai do laco e retorna o ataque # METODO QUE RETORNA O ATAQUE!!! # Molda a URL def generate_URL (self): self.url_parse = urlparse(url) self.url_parse_base = self.url_parse.scheme + "://" + self.url_parse.netloc + "/" # Retorna a URL def get_URL (self): return self.url # Retorna a URL Parseada def get_URL_Parse (self): return self.url_parse # Retorna a URL Parseada Base def get_URL_Parse_Base (self): return self.url_parse_base # Retorna a URL Encode def gen_URL_Encode (self, url): #self.url_encode = urllib.urlencode(self.url : ) print self.url_encode # Localiza um ataque no codigo-fonte def find_response (self, bs, response): self.bs_auxiliar = bs self.response_auxiliar = response #self.bs_auxiliar = BeautifulSoup(html.read(), "lxml") #url = "http://10.37.129.4/cat.php?id=1" #url_base = "http://10.37.129.4/cat.php" #args = {'id' : 1} #sql1 = PluginSQL.PluginSQL() #temp = sql1.getAttack() #for z in temp: #argu = {'id' : z} #encode_args = urllib.urlencode(argu) #print encode_args #print z #url2 = urlparse(url) #url1 = self.url_base.scheme + "://" + self.url_base.netloc + "/" + self.allLinks[self.i]['href'] #print url2.scheme vv = Exploiter("http://10.37.129.4/cat.php?id=1") pl2 = PluginSQL.PluginSQL() vv.sendAttack(pl2) #vv.gen_URL_Encode("id=1' or '3=3")
[ [ 1, 0, 0.0083, 0.0083, 0, 0.66, 0, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0165, 0.0083, 0, 0.66, 0.1429, 559, 0, 1, 0, 0, 559, 0, 0 ], [ 1, 0, 0.0248, 0.0083, 0, ...
[ "import urllib", "from Business import PluginSQL", "from bs4 import BeautifulSoup", "from urlparse import urlparse", "class Exploiter(object):\n \"\"\"\"\"\"\n\n #----------------------------------------------------------------------\n # Construtor recebe a URL alvo e o objeto Plugin dos ataques\n ...
from urlparse import urlparse from Business import Exploiter from Business import PluginSQL from Business import PluginXSS from bs4 import BeautifulSoup ######################################################################## class Controller (object): """""" #---------------------------------------------------------------------- def __init__(self, url): """ Constructor """ # Base da URL informada self.exploiter = Exploiter.Exploiter(url) self.URL_Original = self.exploiter.mount_URLs() #---------------------------------------------------------------------- def check_URL (self): """ Verifica se a URL infromada esta de acordo e funcional para ser utilizada """ return self.exploiter.mount_URLs() #---------------------------------------------------------------------- def get_Server_Info (self): """ Retorna as informacoes do servidor """ self.get_server_Info = self.exploiter.get_Server_Info() return self.get_server_Info #---------------------------------------------------------------------- def select_Plugin (self, plugin, stop_scan): """ Seleciona quais ataques devem ser realizados """ self.stop_scan = stop_scan if plugin == 1: self.pl_attack = PluginSQL.PluginSQL() if plugin == 2: self.pl_attack = PluginXSS.PluginXSS() #---------------------------------------------------------------------- def send_Exploiter (self): """ Pega a URL informada, mais o plugin selecionado e retorna com o ataque """ # Inclui as URLs na lista self.targets self.targets = [] self.targets = self.exploiter.get_All_Targets() # Inclui os ataques na lista self.url_plugin self.url_plugin = [] self.url_plugin = self.pl_attack.gerar_Ataques(self.targets) self.retorno_plugin = [] for url_ataque in self.url_plugin: self.html = self.exploiter.send_Attack(url_ataque[0]) self.retorno_plugin.append(self.pl_attack.analisa_Resultado(self.html, url_ataque[0], url_ataque[1])) self.resultado = self.pl_attack.analisa_Resultado(self.html, url_ataque[0], url_ataque[1]) try: if (self.resultado[0] == 'VULNERAVEL') and (self.stop_scan == 1) and (self.resultado[0] != None): return self.retorno_plugin except: pass #FORM ###### self.ex_get_Form = [] self.ex_get_Form = self.exploiter.get_Form(self.URL_Original) # Recebe uma lista com as URL, Formulario e Ataque self.pl_gen_Form_Attack = [] self.pl_gen_Form_Attack = self.pl_attack.gen_Form_Attack(self.ex_get_Form, self.URL_Original) # Recebe uma lista com o codigo-fonte da pagina atacada self.respostas_html = [] try: for n in self.pl_gen_Form_Attack: self.respostas_html.append(self.exploiter.get_Form_HTML(n)) for html in self.respostas_html: self.atributo_form = "FORMULARIOS: " for formu in html[0][1][1]: self.atributo_form += ''.join(formu [0]) + " " self.atributo_form += "ATAQUE: " + ''.join(formu [1]) self.retorno_plugin.append(self.pl_attack.analisa_Resultado(html[0][0], html[0][1][0], self.atributo_form)) if (self.resultado[0] == 'VULNERAVEL') and (self.stop_scan == 1): return self.retorno_plugin except: pass # FINAL FORM ###### return self.retorno_plugin
[ [ 1, 0, 0.009, 0.009, 0, 0.66, 0, 857, 0, 1, 0, 0, 857, 0, 0 ], [ 1, 0, 0.018, 0.009, 0, 0.66, 0.2, 559, 0, 1, 0, 0, 559, 0, 0 ], [ 1, 0, 0.027, 0.009, 0, 0.66, ...
[ "from urlparse import urlparse", "from Business import Exploiter", "from Business import PluginSQL", "from Business import PluginXSS", "from bs4 import BeautifulSoup", "class Controller (object):\n \"\"\"\"\"\"\n #----------------------------------------------------------------------\n def __init...
#!/usr/bin/env python import urllib,requests,urllib2,socket,mechanize from urlparse import urlsplit,urlparse,urlunparse,urljoin from Data import Data_Access from bs4 import BeautifulSoup ######################################################################## class PluginXSS (object): """ Responsavel por montar e analisar ataques de Injecao de Codigos Script """ #---------------------------------------------------------------------- def __init__(self): """ Constructor """ self.data_Access = Data_Access(2, "PL_XSS") self.attack_Name = "XSS Injection" self.attack_Criticity = "High" #---------------------------------------------------------------------- def get_Attack (self): """ Retorna os ataques que estao no Banco de Dados """ return self.data_Access.getAttack() #---------------------------------------------------------------------- def get_Mitigation (self): """ Retorna as mitigacoes que estao no Banco de Dados """ return self.data_Access.getMitigation() #---------------------------------------------------------------------- def get_Impact (self): """ Retorna o nome da vulnerabilidade """ return self.attack_Criticity #---------------------------------------------------------------------- def get_Response (self): """ Retorna as respostas que estao no Banco de Dados """ return self.data_Access.getResponse() #---------------------------------------------------------------------- def get_Vulnerability (self): """ Retorna o nome da vulnerabilidade """ return self.attack_Name #---------------------------------------------------------------------- def get_Attack_Monted (self, url): """ Retorna a URL concatenada com o ataque """ self.atack_monted = [] for atack in self.getAttack(): self.atack_monted.append(url + ''.join(atack)) return self.atack_monted #---------------------------------------------------------------------- def mount_URL_Attack (self, url): """ Ataque via URL GET """ self.url_attack_parse = urlparse(url) self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?" self.url_atributos_ataques = [] self.atributo_ataque = [] # Pega todos os atributos da URL e os separa em uma lista self.urls_att = [] self.urls_att.append('&'.rpartition(self.url_attack_parse.query)) #---------------------------------------------------------------------- def gen_Form_Attack(self, list_forms, url): """ Inclui no formulario o ataque """ self.list_forms = [] self.list_forms = list_forms self.list_forms_ataques = [] self.aux = [] for ataque in self.get_Attack(): for formulario in self.list_forms: self.aux.append([formulario,''.join(ataque)]) self.list_forms_ataques.append([url, self.aux]) self.aux = [] return self.list_forms_ataques #---------------------------------------------------------------------- def analisa_Resultado (self, html, url_ataque, atributo): """ Analisa o codigo da pagina HTML se existe a presenca de Injecao de codigos Script """ self.bsxss = BeautifulSoup(html, 'lxml') self.script = self.bsxss.find_all ('script') self.attack_Result = [] # Verifica se existe getResponse no codigo-fonte da pagina atacada for resp in self.script: if resp.find('<script>alert(\'__XSS__\')</script>') != -1: # Concatena as informacoes retorno self.attack_Result.append('VULNERAVEL') self.attack_Result.append(self.get_Impact()) self.attack_Result.append(url_ataque) self.attack_Result.append(atributo) self.attack_Result.append(self.get_Mitigation()) # 1 = Ataque funcionou self.attack_Result.append(True) else: # Concatena as informacoes retorno self.attack_Result.append('NAO VULNERAVEL') self.attack_Result.append(self.get_Impact()) self.attack_Result.append(url_ataque) self.attack_Result.append(atributo) self.attack_Result.append(self.get_Mitigation()) # 0 = Ataque nao funcionou self.attack_Result.append(False) return self.attack_Result #---------------------------------------------------------------------- def gerar_Ataques (self, url): """ Ataca os parametros da URL informada """ self.URL_List_Target = [] self.URL_List_Target = url self.lista_ataques_gerados = [] for URL_Target in self.URL_List_Target: self.url_attack_parse = urlparse(URL_Target) self.url_parse_base = self.url_attack_parse.scheme + "://" + self.url_attack_parse.netloc + self.url_attack_parse.path + "?" self.url_atributos_ataques = [] self.atributo_ataque = [] self.lista_var = [] self.lista_var = self.url_attack_parse.query.split('&') # Pega todos os atributos da URL e os separa em uma lista for atributo in self.url_attack_parse.query.split('&'): self.str_var_fix = "" for var_fix in self.lista_var: if var_fix == atributo: pass else: self.str_var_fix = self.str_var_fix + "&" + var_fix # Pega o atributo e inclui o ataque nele for atack in self.get_Attack(): self.lista_ataques_gerados.append([self.url_parse_base + atributo + ''.join(atack) + self.str_var_fix, atributo]) return self.lista_ataques_gerados
[ [ 1, 0, 0.0118, 0.0059, 0, 0.66, 0, 614, 0, 5, 0, 0, 614, 0, 0 ], [ 1, 0, 0.0176, 0.0059, 0, 0.66, 0.25, 857, 0, 4, 0, 0, 857, 0, 0 ], [ 1, 0, 0.0235, 0.0059, 0, 0....
[ "import urllib,requests,urllib2,socket,mechanize", "from urlparse import urlsplit,urlparse,urlunparse,urljoin", "from Data import Data_Access", "from bs4 import BeautifulSoup", "class PluginXSS (object):\n \"\"\"\n Responsavel por montar e analisar ataques de Injecao de Codigos Script\n \"\"\" \...
import MySQLdb ######################################################################## class Data_Access (object): #---------------------------------------------------------------------- def __init__ (self, id_vulnerability, pl_attack): """ Construtor: Cria conexão com a Base de Dados """ try: # Ainda falta tratar erros... db = MySQLdb.connect ( host = 'localhost', user = 'root', passwd = '', db = 'DB_1CLIC2LEARN' ) self.id_vulnerability = id_vulnerability self.pl_attack = pl_attack except _mysql_exceptions.OperationalError as e: print e return [("Can't connect to local MySQL server"), (e)] #---------------------------------------------------------------------- def get_Attack (self): """ Retorna os ataques """ self.cursor = db.cursor() self.cursor.execute('SELECT ATTACK FROM ' + pl_attack) self.result = cursor.fetchall() if result: for z in result: print z return result #---------------------------------------------------------------------- def get_Mitigation (self): """ Retorna formas de mitigar a vulnerabilidade identificada nos testes """ self.cursor = db.cursor() self.cursor.execute('SELECT MITIGATION FROM ' + id_vulnerability) self.result = cursor.fetchall() if result: for z in result: print z return result #---------------------------------------------------------------------- def get_Response (self): """ Retorna as respostas que a vulnerabilidade informada apresenta """ self.cursor = db.cursor() self.cursor.execute('SELECT ATTACK_RESPONSE FROM ' + id_vulnerability) self.result = cursor.fetchall() if result: for z in result: print z return result
[ [ 1, 0, 0.0143, 0.0143, 0, 0.66, 0, 838, 0, 1, 0, 0, 838, 0, 0 ], [ 3, 0, 0.5214, 0.9429, 0, 0.66, 1, 671, 0, 4, 0, 0, 186, 0, 14 ], [ 2, 1, 0.2, 0.2429, 1, 0.78, ...
[ "import MySQLdb", "class Data_Access (object):\n #----------------------------------------------------------------------\n def __init__ (self, id_vulnerability, pl_attack):\n \"\"\"\n Construtor:\n Cria conexão com a Base de Dados\n \"\"\" \n try:\t# Ainda falta tra...
import MySQLdb ######################################################################## class Data_Access (object): """ Cria conexao com a Base de Dados """ dba = MySQLdb.connect (host = 'localhost', user = 'root', passwd = '', db = 'DB_1CLIC2LEARN') #---------------------------------------------------------------------- def __init__(self, id_vulnerability, pl_attack): """ Construtor: Configura para a vulnerabilidade selecionada """ self.id_vulnerability = id_vulnerability self.pl_attack = pl_attack #---------------------------------------------------------------------- def getAttack (self): """ RESTORNA OS ATAQUES DA VULNERABILIDADE INFORMADA """ self.cursor = self.dba.cursor() self.cursor.execute('SELECT ATTACK FROM ' + self.pl_attack) self.result = self.cursor.fetchall() return self.result #---------------------------------------------------------------------- def getMitigation (self): """ RESTORNA A FORMA DE MITIGAR A VULNERABILIDADE INFORMADA """ self.cursor = self.dba.cursor() self.cursor.execute('SELECT MITIGATION FROM MITIGATION WHERE ID_VULNERABILITY = ' + str(self.id_vulnerability)) self.result = self.cursor.fetchone() return self.result #---------------------------------------------------------------------- def getResponse (self): """ RESTORNA AS RESPOSTAS QUE A VULNERABILIDADE INFORMADA APRESENTA """ self.cursor = self.dba.cursor() self.cursor.execute('SELECT RESPONSE FROM ATTACK_RESPONSE WHERE ID_VULNERABILITY = ' + str(self.id_vulnerability)) self.result = self.cursor.fetchall() return self.result
[ [ 1, 0, 0.0189, 0.0189, 0, 0.66, 0, 838, 0, 1, 0, 0, 838, 0, 0 ], [ 3, 0, 0.5377, 0.9057, 0, 0.66, 1, 671, 0, 4, 0, 0, 186, 0, 12 ], [ 8, 1, 0.1321, 0.0566, 1, 0.92...
[ "import MySQLdb", "class Data_Access (object):\n \"\"\"\n Cria conexao com a Base de Dados\n \"\"\" \n dba = MySQLdb.connect (host = 'localhost', user = 'root', passwd = '', db = 'DB_1CLIC2LEARN') \n\n\n #----------------------------------------------------------------------", " \"\"\"\n ...
import os import urllib from google.appengine.api import users from google.appengine.ext import ndb import jinja2 import webapp2 JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__)), extensions=['jinja2.ext.autoescape'], autoescape=True) class MainPage(webapp2.RequestHandler): def get(self): template_values = {} template = JINJA_ENVIRONMENT.get_template('index.html') self.response.write(template.render(template_values)) application = webapp2.WSGIApplication([ ('/', MainPage), ], debug=True)
[ [ 1, 0, 0.0385, 0.0385, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0769, 0.0385, 0, 0.66, 0.125, 614, 0, 1, 0, 0, 614, 0, 0 ], [ 1, 0, 0.1538, 0.0385, 0, 0...
[ "import os", "import urllib", "from google.appengine.api import users", "from google.appengine.ext import ndb", "import jinja2", "import webapp2", "JINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n auto...
#! /usr/bin/env python # encoding: utf-8 # waf 1.6.10 VERSION='0.3.3' import sys APPNAME='p2t' top = '.' out = 'build' CPP_SOURCES = ['poly2tri/common/shapes.cc', 'poly2tri/sweep/cdt.cc', 'poly2tri/sweep/advancing_front.cc', 'poly2tri/sweep/sweep_context.cc', 'poly2tri/sweep/sweep.cc', 'testbed/main.cc'] from waflib.Tools.compiler_cxx import cxx_compiler cxx_compiler['win32'] = ['g++'] #Platform specific libs if sys.platform == 'win32': # MS Windows sys_libs = ['glfw', 'opengl32'] elif sys.platform == 'darwin': # Apple OSX sys_libs = ['glfw', 'OpenGL'] else: # GNU/Linux, BSD, etc sys_libs = ['glfw', 'GL'] def options(opt): print(' set_options') opt.load('compiler_cxx') def configure(conf): print(' calling the configuration') conf.load('compiler_cxx') conf.env.CXXFLAGS = ['-O3', '-ffast-math'] conf.env.DEFINES_P2T = ['P2T'] conf.env.LIB_P2T = sys_libs def build(bld): print(' building') bld.program(features = 'cxx cxxprogram', source=CPP_SOURCES, target = 'p2t', uselib = 'P2T')
[ [ 14, 0, 0.1111, 0.0222, 0, 0.66, 0, 557, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.1333, 0.0222, 0, 0.66, 0.0909, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 14, 0, 0.1556, 0.0222, 0, ...
[ "VERSION='0.3.3'", "import sys", "APPNAME='p2t'", "top = '.'", "out = 'build'", "CPP_SOURCES = ['poly2tri/common/shapes.cc',\n 'poly2tri/sweep/cdt.cc',\n 'poly2tri/sweep/advancing_front.cc',\n 'poly2tri/sweep/sweep_context.cc',\n 'poly2tri/sweep/swee...
#!/usr/bin/python # Copyright 2011 Google, Inc. All Rights Reserved. # simple script to walk source tree looking for third-party licenses # dumps resulting html page to stdout import os, re, mimetypes, sys # read source directories to scan from command line SOURCE = sys.argv[1:] # regex to find /* */ style comment blocks COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL) # regex used to detect if comment block is a license COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE) COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE) EXCLUDE_TYPES = [ "application/xml", "image/png", ] # list of known licenses; keys are derived by stripping all whitespace and # forcing to lowercase to help combine multiple files that have same license. KNOWN_LICENSES = {} class License: def __init__(self, license_text): self.license_text = license_text self.filenames = [] # add filename to the list of files that have the same license text def add_file(self, filename): if filename not in self.filenames: self.filenames.append(filename) LICENSE_KEY = re.compile(r"[^\w]") def find_license(license_text): # TODO(alice): a lot these licenses are almost identical Apache licenses. # Most of them differ in origin/modifications. Consider combining similar # licenses. license_key = LICENSE_KEY.sub("", license_text).lower() if license_key not in KNOWN_LICENSES: KNOWN_LICENSES[license_key] = License(license_text) return KNOWN_LICENSES[license_key] def discover_license(exact_path, filename): # when filename ends with LICENSE, assume applies to filename prefixed if filename.endswith("LICENSE"): with open(exact_path) as file: license_text = file.read() target_filename = filename[:-len("LICENSE")] if target_filename.endswith("."): target_filename = target_filename[:-1] find_license(license_text).add_file(target_filename) return None # try searching for license blocks in raw file mimetype = mimetypes.guess_type(filename) if mimetype in EXCLUDE_TYPES: return None with open(exact_path) as file: raw_file = file.read() # include comments that have both "license" and "copyright" in the text for comment in COMMENT_BLOCK.finditer(raw_file): comment = comment.group(1) if COMMENT_LICENSE.search(comment) is None: continue if COMMENT_COPYRIGHT.search(comment) is None: continue find_license(comment).add_file(filename) for source in SOURCE: for root, dirs, files in os.walk(source): for name in files: discover_license(os.path.join(root, name), name) print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>" for license in KNOWN_LICENSES.values(): print "<h3>Notices for files:</h3><ul>" filenames = license.filenames filenames.sort() for filename in filenames: print "<li>%s</li>" % (filename) print "</ul>" print "<pre>%s</pre>" % license.license_text print "</body></html>"
[ [ 1, 0, 0.0816, 0.0102, 0, 0.66, 0, 688, 0, 4, 0, 0, 688, 0, 0 ], [ 14, 0, 0.1224, 0.0102, 0, 0.66, 0.0714, 792, 6, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1531, 0.0102, 0, ...
[ "import os, re, mimetypes, sys", "SOURCE = sys.argv[1:]", "COMMENT_BLOCK = re.compile(r\"(/\\*.+?\\*/)\", re.MULTILINE | re.DOTALL)", "COMMENT_LICENSE = re.compile(r\"(license)\", re.IGNORECASE)", "COMMENT_COPYRIGHT = re.compile(r\"(copyright)\", re.IGNORECASE)", "EXCLUDE_TYPES = [\n \"application/xml\...
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2012 Zdenko Podobný # Author: Zdenko Podobný # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Simple python demo script of tesseract-ocr 3.02 c-api """ import os import sys import ctypes # Demo variables lang = "eng" filename = "../phototest.tif" libpath = "/usr/local/lib64/" libpath_w = "../vs2008/DLL_Release/" TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX') if not TESSDATA_PREFIX: TESSDATA_PREFIX = "../" if sys.platform == "win32": libname = libpath_w + "libtesseract302.dll" libname_alt = "libtesseract302.dll" os.environ["PATH"] += os.pathsep + libpath_w else: libname = libpath + "libtesseract.so.3.0.2" libname_alt = "libtesseract.so.3" try: tesseract = ctypes.cdll.LoadLibrary(libname) except: try: tesseract = ctypes.cdll.LoadLibrary(libname_alt) except WindowsError, err: print("Trying to load '%s'..." % libname) print("Trying to load '%s'..." % libname_alt) print(err) exit(1) tesseract.TessVersion.restype = ctypes.c_char_p tesseract_version = tesseract.TessVersion()[:4] # We need to check library version because libtesseract.so.3 is symlink # and can point to other version than 3.02 if float(tesseract_version) < 3.02: print("Found tesseract-ocr library version %s." % tesseract_version) print("C-API is present only in version 3.02!") exit(2) api = tesseract.TessBaseAPICreate() rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang); if (rc): tesseract.TessBaseAPIDelete(api) print("Could not initialize tesseract.\n") exit(3) text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0); result_text = ctypes.string_at(text_out) print result_text
[ [ 1, 0, 0.25, 0.25, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.5, 0.25, 0, 0.66, 0.5, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.75, 0.25, 0, 0.66, 1, ...
[ "import os", "import sys", "import ctypes" ]
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'Michael Liao (askxuefeng@gmail.com)' import os import cgi import time import logging import simplejson from datetime import date from google.appengine.api import xmpp from google.appengine.ext import webapp from google.appengine.ext.webapp.util import run_wsgi_app from google.appengine.api import urlfetch from google.appengine.runtime import apiproxy_errors from google.appengine.api import memcache from google.appengine.api import users from Cheetah.Template import Template from autogen import CompiledTemplate import weather import store def get_city(request): # try get city from cookie: if 'Cookie' in request.headers: all = request.headers['Cookie'] if all: cookies = all.split(';') for cookie in cookies: c = cookie.strip() if c.startswith('city='): return c[5:] return None def fetch_weather_in_cache(city): data = memcache.get(str(city.code)) if data: return data data = fetch_weather(city) if data is None: return None memcache.set(str(city.code), data, 3600) return data def fetch_weather(city): data = fetch_rss(city.code) if data is None: return None return str(weather.Weather(city.name, data)) def fetch_rss(code): url = 'http://weather.yahooapis.com/forecastrss?w=%s' % code logging.info('Fetch RSS: %s' % url) try: result = urlfetch.fetch(url, follow_redirects=False) except (urlfetch.Error, apiproxy_errors.Error): return None if result.status_code!=200: return None return result.content class XmppHandler(webapp.RequestHandler): def post(self): message = xmpp.Message(self.request.POST) logging.info('XMPP from %s: %s' % (message.sender, message.body)) name = message.body.strip().lower() if name=='': message.reply(u'''噢,啥都不输,怎么知道您要查询的城市啊? http://weather-china.appspot.com/ ''') return city = store.find_city(name, return_default=False) if city is None: message.reply(u''':( 噢,没有找到您要查询的城市 "%s"。 http://weather-china.appspot.com/ ''' % name) return json = fetch_weather_in_cache(city) if json is None: return message.reply(u''':( 对不起,网络故障,暂时无法查询,请过几分钟再试试。 http://weather-china.appspot.com/ ''') if isinstance(json, unicode): json = json.encode('utf-8') w = simplejson.loads(json, encoding='utf-8') return message.reply( u'''%s: 今日:%s,%s~%s度 明日:%s,%s~%s度 更详细的预报请查看 http://weather-china.appspot.com/?city=%s ''' % ( w[u'name'], w[u'forecasts'][0][u'text'], w[u'forecasts'][0][u'low'], w[u'forecasts'][0][u'high'], w[u'forecasts'][1][u'text'], w[u'forecasts'][1][u'low'], w[u'forecasts'][1][u'high'], city.first_alias(),) ) class HomeHandler(webapp.RequestHandler): def get(self): time_1 = time.time() name = self.request.get('city', '') if not name: name = get_city(self.request) if not name: name = 'beijing' cities = memcache.get('__cities__') if cities is None: cities = store.get_cities() memcache.set('__cities__', cities, 3600) city = None for c in cities: if c.name==name or name in c.aliases: city = c break if city is None: self.response.set_status(500) return today = date.today() target = date(today.year+3, today.month, today.day) expires = target.strftime('%a, %d-%b-%Y %H:%M:%S GMT') self.response.headers['Set-Cookie'] = 'city=%s; expires=%s; path=/' % (city.first_alias(), expires) time_2 = time.time() t = CompiledTemplate(searchList=[{'city' : city, 'cities' : cities}]) self.response.out.write(t) time_3 = time.time() logging.info('Performance: %f / %f of rendering / total.' % (time_3-time_2, time_3-time_1)) class AdminHandler(webapp.RequestHandler): def get(self): login = self.get_login_url() if login: self.redirect(login) return action = self.request.get('action', '') if action=='delete_city': key = self.request.get('key') store.delete_city(key) self.redirect_admin() return if action=='': cities = store.get_cities() root = os.path.dirname(__file__) t = Template(file=os.path.join(root, 'admin.html'), searchList=[{'cities' : cities}]) self.response.out.write(t) return self.response.set_status(400) def post(self): login = self.get_login_url() if login: self.redirect(login) return action = self.request.get('action') if action=='create_city': name = cgi.escape(self.request.get('name')).strip().lower() aliases = [cgi.escape(x).lower() for x in self.request.get_all('aliases') if x.strip()] code = int(self.request.get('code')) store.create_city(name, aliases, code) self.redirect_admin() return self.response.set_status(400) def get_login_url(self): if not users.is_current_user_admin(): return users.create_login_url('/admin') return None def redirect_admin(self): self.redirect('/admin?t=%s' % time.time()) class ApiHandler(webapp.RequestHandler): CACHE_TIME = 600 # 600 seconds def get(self): callback = '' c = '' extension = self.request.get('extension', '') if extension=='chrome': # detect city from cookie: c = get_city(self.request) if not c: c = 'beijing' else: callback = cgi.escape(self.request.get('callback', '').strip()) c = cgi.escape(self.request.get('city', '')).lower() if not c: return self.send_error('MISSING_PARAMETER', 'Missing parameter \'city\'') city = store.find_city(c, return_default=False) if city is None: return self.send_error('CITY_NOT_FOUND', 'City not found') weather = fetch_weather_in_cache(city) if weather is None: return self.send_error('SERVICE_UNAVAILABLE', 'Service unavailable') if callback: if isinstance(callback, unicode): callback = callback.encode('utf-8') self.write_json('%s(%s);' % (callback, weather)) else: self.write_json(weather) def send_error(self, code, msg): json = '{ "error" : "%s", "message" : "%s"}' % (code, msg) self.write_json(json) def write_json(self, json): if isinstance(json, unicode): json = json.encode('utf-8') self.response.headers['Content-Type'] = 'application/json; charset=utf-8' self.response.out.write(json) application = webapp.WSGIApplication([ ('^/$', HomeHandler), ('^/api$', ApiHandler), ('^/admin$', AdminHandler), ('^/_ah/xmpp/message/chat/$', XmppHandler), ], debug=True) def main(): run_wsgi_app(application) if __name__ == "__main__": main()
[ [ 14, 0, 0.0175, 0.0044, 0, 0.66, 0, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.0263, 0.0044, 0, 0.66, 0.0357, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0307, 0.0044, 0, 0...
[ "__author__ = 'Michael Liao (askxuefeng@gmail.com)'", "import os", "import cgi", "import time", "import logging", "import simplejson", "from datetime import date", "from google.appengine.api import xmpp", "from google.appengine.ext import webapp", "from google.appengine.ext.webapp.util import run_...
# $Id: CheetahWrapper.py,v 1.26 2007/10/02 01:22:04 tavis_rudd Exp $ """Cheetah command-line interface. 2002-09-03 MSO: Total rewrite. 2002-09-04 MSO: Bugfix, compile command was using wrong output ext. 2002-11-08 MSO: Another rewrite. Meta-Data ================================================================================ Author: Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>> Version: $Revision: 1.26 $ Start Date: 2001/03/30 Last Revision Date: $Date: 2007/10/02 01:22:04 $ """ __author__ = "Tavis Rudd <tavis@damnsimple.com> and Mike Orr <sluggoster@gmail.com>" __revision__ = "$Revision: 1.26 $"[11:-2] import getopt, glob, os, pprint, re, shutil, sys import cPickle as pickle from optparse import OptionParser from Cheetah.Version import Version from Cheetah.Template import Template, DEFAULT_COMPILER_SETTINGS from Cheetah.Utils.Misc import mkdirsWithPyInitFiles optionDashesRE = re.compile( R"^-{1,2}" ) moduleNameRE = re.compile( R"^[a-zA-Z_][a-zA-Z_0-9]*$" ) def fprintfMessage(stream, format, *args): if format[-1:] == '^': format = format[:-1] else: format += '\n' if args: message = format % args else: message = format stream.write(message) class Error(Exception): pass class Bundle: """Wrap the source, destination and backup paths in one neat little class. Used by CheetahWrapper.getBundles(). """ def __init__(self, **kw): self.__dict__.update(kw) def __repr__(self): return "<Bundle %r>" % self.__dict__ ################################################## ## USAGE FUNCTION & MESSAGES def usage(usageMessage, errorMessage="", out=sys.stderr): """Write help text, an optional error message, and abort the program. """ out.write(WRAPPER_TOP) out.write(usageMessage) exitStatus = 0 if errorMessage: out.write('\n') out.write("*** USAGE ERROR ***: %s\n" % errorMessage) exitStatus = 1 sys.exit(exitStatus) WRAPPER_TOP = """\ __ ____________ __ \ \/ \/ / \/ * * \/ CHEETAH %(Version)s Command-Line Tool \ | / \ ==----== / by Tavis Rudd <tavis@damnsimple.com> \__________/ and Mike Orr <sluggoster@gmail.com> """ % globals() HELP_PAGE1 = """\ USAGE: ------ cheetah compile [options] [FILES ...] : Compile template definitions cheetah fill [options] [FILES ...] : Fill template definitions cheetah help : Print this help message cheetah options : Print options help message cheetah test [options] : Run Cheetah's regression tests : (same as for unittest) cheetah version : Print Cheetah version number You may abbreviate the command to the first letter; e.g., 'h' == 'help'. If FILES is a single "-", read standard input and write standard output. Run "cheetah options" for the list of valid options. """ ################################################## ## CheetahWrapper CLASS class CheetahWrapper(object): MAKE_BACKUPS = True BACKUP_SUFFIX = ".bak" _templateClass = None _compilerSettings = None def __init__(self): self.progName = None self.command = None self.opts = None self.pathArgs = None self.sourceFiles = [] self.searchList = [] self.parser = None ################################################## ## MAIN ROUTINE def main(self, argv=None): """The main program controller.""" if argv is None: argv = sys.argv # Step 1: Determine the command and arguments. try: self.progName = progName = os.path.basename(argv[0]) self.command = command = optionDashesRE.sub("", argv[1]) if command == 'test': self.testOpts = argv[2:] else: self.parseOpts(argv[2:]) except IndexError: usage(HELP_PAGE1, "not enough command-line arguments") # Step 2: Call the command meths = (self.compile, self.fill, self.help, self.options, self.test, self.version) for meth in meths: methName = meth.__name__ # Or meth.im_func.func_name # Or meth.func_name (Python >= 2.1 only, sometimes works on 2.0) methInitial = methName[0] if command in (methName, methInitial): sys.argv[0] += (" " + methName) # @@MO: I don't necessarily agree sys.argv[0] should be # modified. meth() return # If none of the commands matched. usage(HELP_PAGE1, "unknown command '%s'" % command) def parseOpts(self, args): C, D, W = self.chatter, self.debug, self.warn self.isCompile = isCompile = self.command[0] == 'c' defaultOext = isCompile and ".py" or ".html" self.parser = OptionParser() pao = self.parser.add_option pao("--idir", action="store", dest="idir", default='', help='Input directory (defaults to current directory)') pao("--odir", action="store", dest="odir", default="", help='Output directory (defaults to current directory)') pao("--iext", action="store", dest="iext", default=".tmpl", help='File input extension (defaults: compile: .tmpl, fill: .tmpl)') pao("--oext", action="store", dest="oext", default=defaultOext, help='File output extension (defaults: compile: .py, fill: .html)') pao("-R", action="store_true", dest="recurse", default=False, help='Recurse through subdirectories looking for input files') pao("--stdout", "-p", action="store_true", dest="stdout", default=False, help='Send output to stdout instead of writing to a file') pao("--quiet", action="store_false", dest="verbose", default=True, help='Do not print informational messages to stdout') pao("--debug", action="store_true", dest="debug", default=False, help='Print diagnostic/debug information to stderr') pao("--env", action="store_true", dest="env", default=False, help='Pass the environment into the search list') pao("--pickle", action="store", dest="pickle", default="", help='Unpickle FILE and pass it through in the search list') pao("--flat", action="store_true", dest="flat", default=False, help='Do not build destination subdirectories') pao("--nobackup", action="store_true", dest="nobackup", default=False, help='Do not make backup files when generating new ones') pao("--settings", action="store", dest="compilerSettingsString", default=None, help='String of compiler settings to pass through, e.g. --settings="useNameMapper=False,useFilters=False"') pao('--print-settings', action='store_true', dest='print_settings', help='Print out the list of available compiler settings') pao("--templateAPIClass", action="store", dest="templateClassName", default=None, help='Name of a subclass of Cheetah.Template.Template to use for compilation, e.g. MyTemplateClass') pao("--parallel", action="store", type="int", dest="parallel", default=1, help='Compile/fill templates in parallel, e.g. --parallel=4') pao('--shbang', dest='shbang', default='#!/usr/bin/env python', help='Specify the shbang to place at the top of compiled templates, e.g. --shbang="#!/usr/bin/python2.6"') opts, files = self.parser.parse_args(args) self.opts = opts if sys.platform == "win32": new_files = [] for spec in files: file_list = glob.glob(spec) if file_list: new_files.extend(file_list) else: new_files.append(spec) files = new_files self.pathArgs = files D("""\ cheetah compile %s Options are %s Files are %s""", args, pprint.pformat(vars(opts)), files) if opts.print_settings: print() print('>> Available Cheetah compiler settings:') from Cheetah.Compiler import _DEFAULT_COMPILER_SETTINGS listing = _DEFAULT_COMPILER_SETTINGS listing.sort(key=lambda l: l[0][0].lower()) for l in listing: print('\t%s (default: "%s")\t%s' % l) sys.exit(0) #cleanup trailing path separators seps = [sep for sep in [os.sep, os.altsep] if sep] for attr in ['idir', 'odir']: for sep in seps: path = getattr(opts, attr, None) if path and path.endswith(sep): path = path[:-len(sep)] setattr(opts, attr, path) break self._fixExts() if opts.env: self.searchList.insert(0, os.environ) if opts.pickle: f = open(opts.pickle, 'rb') unpickled = pickle.load(f) f.close() self.searchList.insert(0, unpickled) ################################################## ## COMMAND METHODS def compile(self): self._compileOrFill() def fill(self): from Cheetah.ImportHooks import install install() self._compileOrFill() def help(self): usage(HELP_PAGE1, "", sys.stdout) def options(self): return self.parser.print_help() def test(self): # @@MO: Ugly kludge. TEST_WRITE_FILENAME = 'cheetah_test_file_creation_ability.tmp' try: f = open(TEST_WRITE_FILENAME, 'w') except: sys.exit("""\ Cannot run the tests because you don't have write permission in the current directory. The tests need to create temporary files. Change to a directory you do have write permission to and re-run the tests.""") else: f.close() os.remove(TEST_WRITE_FILENAME) # @@MO: End ugly kludge. from Cheetah.Tests import Test import unittest verbosity = 1 if '-q' in self.testOpts: verbosity = 0 if '-v' in self.testOpts: verbosity = 2 runner = unittest.TextTestRunner(verbosity=verbosity) runner.run(unittest.TestSuite(Test.suites)) def version(self): print(Version) # If you add a command, also add it to the 'meths' variable in main(). ################################################## ## LOGGING METHODS def chatter(self, format, *args): """Print a verbose message to stdout. But don't if .opts.stdout is true or .opts.verbose is false. """ if self.opts.stdout or not self.opts.verbose: return fprintfMessage(sys.stdout, format, *args) def debug(self, format, *args): """Print a debugging message to stderr, but don't if .debug is false. """ if self.opts.debug: fprintfMessage(sys.stderr, format, *args) def warn(self, format, *args): """Always print a warning message to stderr. """ fprintfMessage(sys.stderr, format, *args) def error(self, format, *args): """Always print a warning message to stderr and exit with an error code. """ fprintfMessage(sys.stderr, format, *args) sys.exit(1) ################################################## ## HELPER METHODS def _fixExts(self): assert self.opts.oext, "oext is empty!" iext, oext = self.opts.iext, self.opts.oext if iext and not iext.startswith("."): self.opts.iext = "." + iext if oext and not oext.startswith("."): self.opts.oext = "." + oext def _compileOrFill(self): C, D, W = self.chatter, self.debug, self.warn opts, files = self.opts, self.pathArgs if files == ["-"]: self._compileOrFillStdin() return elif not files and opts.recurse: which = opts.idir and "idir" or "current" C("Drilling down recursively from %s directory.", which) sourceFiles = [] dir = os.path.join(self.opts.idir, os.curdir) os.path.walk(dir, self._expandSourceFilesWalk, sourceFiles) elif not files: usage(HELP_PAGE1, "Neither files nor -R specified!") else: sourceFiles = self._expandSourceFiles(files, opts.recurse, True) sourceFiles = [os.path.normpath(x) for x in sourceFiles] D("All source files found: %s", sourceFiles) bundles = self._getBundles(sourceFiles) D("All bundles: %s", pprint.pformat(bundles)) if self.opts.flat: self._checkForCollisions(bundles) # In parallel mode a new process is forked for each template # compilation, out of a pool of size self.opts.parallel. This is not # really optimal in all cases (e.g. probably wasteful for small # templates), but seems to work well in real life for me. # # It also won't work for Windows users, but I'm not going to lose any # sleep over that. if self.opts.parallel > 1: bad_child_exit = 0 pid_pool = set() def child_wait(): pid, status = os.wait() pid_pool.remove(pid) return os.WEXITSTATUS(status) while bundles: b = bundles.pop() pid = os.fork() if pid: pid_pool.add(pid) else: self._compileOrFillBundle(b) sys.exit(0) if len(pid_pool) == self.opts.parallel: bad_child_exit = child_wait() if bad_child_exit: break while pid_pool: child_exit = child_wait() if not bad_child_exit: bad_child_exit = child_exit if bad_child_exit: sys.exit("Child process failed, exited with code %d" % bad_child_exit) else: for b in bundles: self._compileOrFillBundle(b) def _checkForCollisions(self, bundles): """Check for multiple source paths writing to the same destination path. """ C, D, W = self.chatter, self.debug, self.warn isError = False dstSources = {} for b in bundles: if b.dst in dstSources: dstSources[b.dst].append(b.src) else: dstSources[b.dst] = [b.src] keys = sorted(dstSources.keys()) for dst in keys: sources = dstSources[dst] if len(sources) > 1: isError = True sources.sort() fmt = "Collision: multiple source files %s map to one destination file %s" W(fmt, sources, dst) if isError: what = self.isCompile and "Compilation" or "Filling" sys.exit("%s aborted due to collisions" % what) def _expandSourceFilesWalk(self, arg, dir, files): """Recursion extension for .expandSourceFiles(). This method is a callback for os.path.walk(). 'arg' is a list to which successful paths will be appended. """ iext = self.opts.iext for f in files: path = os.path.join(dir, f) if path.endswith(iext) and os.path.isfile(path): arg.append(path) elif os.path.islink(path) and os.path.isdir(path): os.path.walk(path, self._expandSourceFilesWalk, arg) # If is directory, do nothing; 'walk' will eventually get it. def _expandSourceFiles(self, files, recurse, addIextIfMissing): """Calculate source paths from 'files' by applying the command-line options. """ C, D, W = self.chatter, self.debug, self.warn idir = self.opts.idir iext = self.opts.iext files = [] for f in self.pathArgs: oldFilesLen = len(files) D("Expanding %s", f) path = os.path.join(idir, f) pathWithExt = path + iext # May or may not be valid. if os.path.isdir(path): if recurse: os.path.walk(path, self._expandSourceFilesWalk, files) else: raise Error("source file '%s' is a directory" % path) elif os.path.isfile(path): files.append(path) elif (addIextIfMissing and not path.endswith(iext) and os.path.isfile(pathWithExt)): files.append(pathWithExt) # Do not recurse directories discovered by iext appending. elif os.path.exists(path): W("Skipping source file '%s', not a plain file.", path) else: W("Skipping source file '%s', not found.", path) if len(files) > oldFilesLen: D(" ... found %s", files[oldFilesLen:]) return files def _getBundles(self, sourceFiles): flat = self.opts.flat idir = self.opts.idir iext = self.opts.iext nobackup = self.opts.nobackup odir = self.opts.odir oext = self.opts.oext idirSlash = idir + os.sep bundles = [] for src in sourceFiles: # 'base' is the subdirectory plus basename. base = src if idir and src.startswith(idirSlash): base = src[len(idirSlash):] if iext and base.endswith(iext): base = base[:-len(iext)] basename = os.path.basename(base) if flat: dst = os.path.join(odir, basename + oext) else: dbn = basename if odir and base.startswith(os.sep): odd = odir while odd != '': idx = base.find(odd) if idx == 0: dbn = base[len(odd):] if dbn[0] == '/': dbn = dbn[1:] break odd = os.path.dirname(odd) if odd == '/': break dst = os.path.join(odir, dbn + oext) else: dst = os.path.join(odir, base + oext) bak = dst + self.BACKUP_SUFFIX b = Bundle(src=src, dst=dst, bak=bak, base=base, basename=basename) bundles.append(b) return bundles def _getTemplateClass(self): C, D, W = self.chatter, self.debug, self.warn modname = None if self._templateClass: return self._templateClass modname = self.opts.templateClassName if not modname: return Template p = modname.rfind('.') if ':' not in modname: self.error('The value of option --templateAPIClass is invalid\n' 'It must be in the form "module:class", ' 'e.g. "Cheetah.Template:Template"') modname, classname = modname.split(':') C('using --templateAPIClass=%s:%s'%(modname, classname)) if p >= 0: mod = getattr(__import__(modname[:p], {}, {}, [modname[p+1:]]), modname[p+1:]) else: mod = __import__(modname, {}, {}, []) klass = getattr(mod, classname, None) if klass: self._templateClass = klass return klass else: self.error('**Template class specified in option --templateAPIClass not found\n' '**Falling back on Cheetah.Template:Template') def _getCompilerSettings(self): if self._compilerSettings: return self._compilerSettings def getkws(**kws): return kws if self.opts.compilerSettingsString: try: exec('settings = getkws(%s)'%self.opts.compilerSettingsString) except: self.error("There's an error in your --settings option." "It must be valid Python syntax.\n" +" --settings='%s'\n"%self.opts.compilerSettingsString +" %s: %s"%sys.exc_info()[:2] ) validKeys = DEFAULT_COMPILER_SETTINGS.keys() if [k for k in settings.keys() if k not in validKeys]: self.error( 'The --setting "%s" is not a valid compiler setting name.'%k) self._compilerSettings = settings return settings else: return {} def _compileOrFillStdin(self): TemplateClass = self._getTemplateClass() compilerSettings = self._getCompilerSettings() if self.isCompile: pysrc = TemplateClass.compile(file=sys.stdin, compilerSettings=compilerSettings, returnAClass=False) output = pysrc else: output = str(TemplateClass(file=sys.stdin, compilerSettings=compilerSettings)) sys.stdout.write(output) def _compileOrFillBundle(self, b): C, D, W = self.chatter, self.debug, self.warn TemplateClass = self._getTemplateClass() compilerSettings = self._getCompilerSettings() src = b.src dst = b.dst base = b.base basename = b.basename dstDir = os.path.dirname(dst) what = self.isCompile and "Compiling" or "Filling" C("%s %s -> %s^", what, src, dst) # No trailing newline. if os.path.exists(dst) and not self.opts.nobackup: bak = b.bak C(" (backup %s)", bak) # On same line as previous message. else: bak = None C("") if self.isCompile: if not moduleNameRE.match(basename): tup = basename, src raise Error("""\ %s: base name %s contains invalid characters. It must be named according to the same rules as Python modules.""" % tup) pysrc = TemplateClass.compile(file=src, returnAClass=False, moduleName=basename, className=basename, commandlineopts=self.opts, compilerSettings=compilerSettings) output = pysrc else: #output = str(TemplateClass(file=src, searchList=self.searchList)) tclass = TemplateClass.compile(file=src, compilerSettings=compilerSettings) output = str(tclass(searchList=self.searchList)) if bak: shutil.copyfile(dst, bak) if dstDir and not os.path.exists(dstDir): if self.isCompile: mkdirsWithPyInitFiles(dstDir) else: os.makedirs(dstDir) if self.opts.stdout: sys.stdout.write(output) else: f = open(dst, 'w') f.write(output) f.close() # Called when invoked as `cheetah` def _cheetah(): CheetahWrapper().main() # Called when invoked as `cheetah-compile` def _cheetah_compile(): sys.argv.insert(1, "compile") CheetahWrapper().main() ################################################## ## if run from the command line if __name__ == '__main__': CheetahWrapper().main() # vim: shiftwidth=4 tabstop=4 expandtab
[ [ 8, 0, 0.0127, 0.0206, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.0237, 0.0016, 0, 0.66, 0.05, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.0253, 0.0016, 0, 0.66, ...
[ "\"\"\"Cheetah command-line interface.\n\n2002-09-03 MSO: Total rewrite.\n2002-09-04 MSO: Bugfix, compile command was using wrong output ext.\n2002-11-08 MSO: Another rewrite.\n\nMeta-Data\n================================================================================", "__author__ = \"Tavis Rudd <tavis@damnsim...
''' Provides an abstract Servlet baseclass for Cheetah's Template class ''' import sys import os.path isWebwareInstalled = False try: try: from ds.appserver.Servlet import Servlet as BaseServlet except: from WebKit.Servlet import Servlet as BaseServlet isWebwareInstalled = True if not issubclass(BaseServlet, object): class NewStyleBaseServlet(BaseServlet, object): pass BaseServlet = NewStyleBaseServlet except: class BaseServlet(object): _reusable = 1 _threadSafe = 0 def awake(self, transaction): pass def sleep(self, transaction): pass def shutdown(self): pass ################################################## ## CLASSES class Servlet(BaseServlet): """This class is an abstract baseclass for Cheetah.Template.Template. It wraps WebKit.Servlet and provides a few extra convenience methods that are also found in WebKit.Page. It doesn't do any of the HTTP method resolution that is done in WebKit.HTTPServlet """ transaction = None application = None request = None session = None def __init__(self, *args, **kwargs): super(Servlet, self).__init__(*args, **kwargs) # this default will be changed by the .awake() method self._CHEETAH__isControlledByWebKit = False ## methods called by Webware during the request-response def awake(self, transaction): super(Servlet, self).awake(transaction) # a hack to signify that the servlet is being run directly from WebKit self._CHEETAH__isControlledByWebKit = True self.transaction = transaction #self.application = transaction.application self.response = response = transaction.response self.request = transaction.request # Temporary hack to accomodate bug in # WebKit.Servlet.Servlet.serverSidePath: it uses # self._request even though this attribute does not exist. # This attribute WILL disappear in the future. self._request = transaction.request() self.session = transaction.session self.write = response().write #self.writeln = response.writeln def respond(self, trans=None): raise NotImplementedError("""\ couldn't find the template's main method. If you are using #extends without #implements, try adding '#implements respond' to your template definition.""") def sleep(self, transaction): super(Servlet, self).sleep(transaction) self.session = None self.request = None self._request = None self.response = None self.transaction = None def shutdown(self): pass def serverSidePath(self, path=None, normpath=os.path.normpath, abspath=os.path.abspath ): if self._CHEETAH__isControlledByWebKit: return super(Servlet, self).serverSidePath(path) elif path: return normpath(abspath(path.replace("\\", '/'))) elif hasattr(self, '_filePath') and self._filePath: return normpath(abspath(self._filePath)) else: return None # vim: shiftwidth=4 tabstop=4 expandtab
[ [ 8, 0, 0.0179, 0.0268, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0446, 0.0089, 0, 0.66, 0.2, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0536, 0.0089, 0, 0.66, ...
[ "'''\nProvides an abstract Servlet baseclass for Cheetah's Template class\n'''", "import sys", "import os.path", "isWebwareInstalled = False", "try:\n try:\n from ds.appserver.Servlet import Servlet as BaseServlet\n except:\n from WebKit.Servlet import Servlet as BaseServlet\n isWebwa...
# $Id: ErrorCatchers.py,v 1.7 2005/01/03 19:59:07 tavis_rudd Exp $ """ErrorCatcher class for Cheetah Templates Meta-Data ================================================================================ Author: Tavis Rudd <tavis@damnsimple.com> Version: $Revision: 1.7 $ Start Date: 2001/08/01 Last Revision Date: $Date: 2005/01/03 19:59:07 $ """ __author__ = "Tavis Rudd <tavis@damnsimple.com>" __revision__ = "$Revision: 1.7 $"[11:-2] import time from Cheetah.NameMapper import NotFound class Error(Exception): pass class ErrorCatcher: _exceptionsToCatch = (NotFound,) def __init__(self, templateObj): pass def exceptions(self): return self._exceptionsToCatch def warn(self, exc_val, code, rawCode, lineCol): return rawCode ## make an alias Echo = ErrorCatcher class BigEcho(ErrorCatcher): def warn(self, exc_val, code, rawCode, lineCol): return "="*15 + "&lt;" + rawCode + " could not be found&gt;" + "="*15 class KeyError(ErrorCatcher): def warn(self, exc_val, code, rawCode, lineCol): raise KeyError("no '%s' in this Template Object's Search List" % rawCode) class ListErrors(ErrorCatcher): """Accumulate a list of errors.""" _timeFormat = "%c" def __init__(self, templateObj): ErrorCatcher.__init__(self, templateObj) self._errors = [] def warn(self, exc_val, code, rawCode, lineCol): dict = locals().copy() del dict['self'] dict['time'] = time.strftime(self._timeFormat, time.localtime(time.time())) self._errors.append(dict) return rawCode def listErrors(self): """Return the list of errors.""" return self._errors
[ [ 8, 0, 0.0968, 0.1452, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1774, 0.0161, 0, 0.66, 0.1, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.1935, 0.0161, 0, 0.66, ...
[ "\"\"\"ErrorCatcher class for Cheetah Templates\n\nMeta-Data\n================================================================================\nAuthor: Tavis Rudd <tavis@damnsimple.com>\nVersion: $Revision: 1.7 $\nStart Date: 2001/08/01\nLast Revision Date: $Date: 2005/01/03 19:59:07 $", "__author__ = \"Tavis Rud...
""" Nothing, but in a friendly way. Good for filling in for objects you want to hide. If $form.f1 is a RecursiveNull object, then $form.f1.anything["you"].might("use") will resolve to the empty string. This module was contributed by Ian Bicking. """ class RecursiveNull(object): def __getattr__(self, attr): return self def __getitem__(self, item): return self def __call__(self, *args, **kwargs): return self def __str__(self): return '' def __repr__(self): return '' def __nonzero__(self): return 0 def __eq__(self, x): if x: return False return True def __ne__(self, x): return x and True or False
[ [ 8, 0, 0.1429, 0.25, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.6429, 0.6786, 0, 0.66, 1, 956, 0, 8, 0, 0, 186, 0, 0 ], [ 2, 1, 0.375, 0.0714, 1, 0.66, 0,...
[ "\"\"\"\nNothing, but in a friendly way. Good for filling in for objects you want to\nhide. If $form.f1 is a RecursiveNull object, then\n$form.f1.anything[\"you\"].might(\"use\") will resolve to the empty string.\n\nThis module was contributed by Ian Bicking.\n\"\"\"", "class RecursiveNull(object):\n def __g...
""" @@TR: This code is pretty much unsupported. MondoReport.py -- Batching module for Python and Cheetah. Version 2001-Nov-18. Doesn't do much practical yet, but the companion testMondoReport.py passes all its tests. -Mike Orr (Iron) TODO: BatchRecord.prev/next/prev_batches/next_batches/query, prev.query, next.query. How about Report: .page(), .all(), .summary()? Or PageBreaker. """ import operator try: from functools import reduce except ImportError: # If functools doesn't exist, we must be on an old # enough version that has reduce() in builtins pass try: from Cheetah.NameMapper import valueForKey as lookup_func except ImportError: def lookup_func(obj, name): if hasattr(obj, name): return getattr(obj, name) else: return obj[name] # Raises KeyError. ########## PUBLIC GENERIC FUNCTIONS ############################## class NegativeError(ValueError): pass def isNumeric(v): return isinstance(v, (int, float)) def isNonNegative(v): ret = isNumeric(v) if ret and v < 0: raise NegativeError(v) def isNotNone(v): return v is not None def Roman(n): n = int(n) # Raises TypeError. if n < 1: raise ValueError("roman numeral for zero or negative undefined: " + n) roman = '' while n >= 1000: n = n - 1000 roman = roman + 'M' while n >= 500: n = n - 500 roman = roman + 'D' while n >= 100: n = n - 100 roman = roman + 'C' while n >= 50: n = n - 50 roman = roman + 'L' while n >= 10: n = n - 10 roman = roman + 'X' while n >= 5: n = n - 5 roman = roman + 'V' while n < 5 and n >= 1: n = n - 1 roman = roman + 'I' roman = roman.replace('DCCCC', 'CM') roman = roman.replace('CCCC', 'CD') roman = roman.replace('LXXXX', 'XC') roman = roman.replace('XXXX', 'XL') roman = roman.replace('VIIII', 'IX') roman = roman.replace('IIII', 'IV') return roman def sum(lis): return reduce(operator.add, lis, 0) def mean(lis): """Always returns a floating-point number. """ lis_len = len(lis) if lis_len == 0: return 0.00 # Avoid ZeroDivisionError (not raised for floats anyway) total = float( sum(lis) ) return total / lis_len def median(lis): lis = sorted(lis[:]) return lis[int(len(lis)/2)] def variance(lis): raise NotImplementedError() def variance_n(lis): raise NotImplementedError() def standardDeviation(lis): raise NotImplementedError() def standardDeviation_n(lis): raise NotImplementedError() class IndexFormats: """Eight ways to display a subscript index. ("Fifty ways to leave your lover....") """ def __init__(self, index, item=None): self._index = index self._number = index + 1 self._item = item def index(self): return self._index __call__ = index def number(self): return self._number def even(self): return self._number % 2 == 0 def odd(self): return not self.even() def even_i(self): return self._index % 2 == 0 def odd_i(self): return not self.even_i() def letter(self): return self.Letter().lower() def Letter(self): n = ord('A') + self._index return chr(n) def roman(self): return self.Roman().lower() def Roman(self): return Roman(self._number) def item(self): return self._item ########## PRIVATE CLASSES ############################## class ValuesGetterMixin: def __init__(self, origList): self._origList = origList def _getValues(self, field=None, criteria=None): if field: ret = [lookup_func(elm, field) for elm in self._origList] else: ret = self._origList if criteria: ret = list(filter(criteria, ret)) return ret class RecordStats(IndexFormats, ValuesGetterMixin): """The statistics that depend on the current record. """ def __init__(self, origList, index): record = origList[index] # Raises IndexError. IndexFormats.__init__(self, index, record) ValuesGetterMixin.__init__(self, origList) def length(self): return len(self._origList) def first(self): return self._index == 0 def last(self): return self._index >= len(self._origList) - 1 def _firstOrLastValue(self, field, currentIndex, otherIndex): currentValue = self._origList[currentIndex] # Raises IndexError. try: otherValue = self._origList[otherIndex] except IndexError: return True if field: currentValue = lookup_func(currentValue, field) otherValue = lookup_func(otherValue, field) return currentValue != otherValue def firstValue(self, field=None): return self._firstOrLastValue(field, self._index, self._index - 1) def lastValue(self, field=None): return self._firstOrLastValue(field, self._index, self._index + 1) # firstPage and lastPage not implemented. Needed? def percentOfTotal(self, field=None, suffix='%', default='N/A', decimals=2): rec = self._origList[self._index] if field: val = lookup_func(rec, field) else: val = rec try: lis = self._getValues(field, isNumeric) except NegativeError: return default total = sum(lis) if total == 0.00: # Avoid ZeroDivisionError. return default val = float(val) try: percent = (val / total) * 100 except ZeroDivisionError: return default if decimals == 0: percent = int(percent) else: percent = round(percent, decimals) if suffix: return str(percent) + suffix # String. else: return percent # Numeric. def __call__(self): # Overrides IndexFormats.__call__ """This instance is not callable, so we override the super method. """ raise NotImplementedError() def prev(self): if self._index == 0: return None else: length = self.length() start = self._index - length return PrevNextPage(self._origList, length, start) def next(self): if self._index + self.length() == self.length(): return None else: length = self.length() start = self._index + length return PrevNextPage(self._origList, length, start) def prevPages(self): raise NotImplementedError() def nextPages(self): raise NotImplementedError() prev_batches = prevPages next_batches = nextPages def summary(self): raise NotImplementedError() def _prevNextHelper(self, start, end, size, orphan, sequence): """Copied from Zope's DT_InSV.py's "opt" function. """ if size < 1: if start > 0 and end > 0 and end >= start: size=end+1-start else: size=7 if start > 0: try: sequence[start-1] except: start=len(sequence) # if start > l: start=l if end > 0: if end < start: end=start else: end=start+size-1 try: sequence[end+orphan-1] except: end=len(sequence) # if l - end < orphan: end=l elif end > 0: try: sequence[end-1] except: end=len(sequence) # if end > l: end=l start=end+1-size if start - 1 < orphan: start=1 else: start=1 end=start+size-1 try: sequence[end+orphan-1] except: end=len(sequence) # if l - end < orphan: end=l return start, end, size class Summary(ValuesGetterMixin): """The summary statistics, that don't depend on the current record. """ def __init__(self, origList): ValuesGetterMixin.__init__(self, origList) def sum(self, field=None): lis = self._getValues(field, isNumeric) return sum(lis) total = sum def count(self, field=None): lis = self._getValues(field, isNotNone) return len(lis) def min(self, field=None): lis = self._getValues(field, isNotNone) return min(lis) # Python builtin function min. def max(self, field=None): lis = self._getValues(field, isNotNone) return max(lis) # Python builtin function max. def mean(self, field=None): """Always returns a floating point number. """ lis = self._getValues(field, isNumeric) return mean(lis) average = mean def median(self, field=None): lis = self._getValues(field, isNumeric) return median(lis) def variance(self, field=None): raiseNotImplementedError() def variance_n(self, field=None): raiseNotImplementedError() def standardDeviation(self, field=None): raiseNotImplementedError() def standardDeviation_n(self, field=None): raiseNotImplementedError() class PrevNextPage: def __init__(self, origList, size, start): end = start + size self.start = IndexFormats(start, origList[start]) self.end = IndexFormats(end, origList[end]) self.length = size ########## MAIN PUBLIC CLASS ############################## class MondoReport: _RecordStatsClass = RecordStats _SummaryClass = Summary def __init__(self, origlist): self._origList = origlist def page(self, size, start, overlap=0, orphan=0): """Returns list of ($r, $a, $b) """ if overlap != 0: raise NotImplementedError("non-zero overlap") if orphan != 0: raise NotImplementedError("non-zero orphan") origList = self._origList origList_len = len(origList) start = max(0, start) end = min( start + size, len(self._origList) ) mySlice = origList[start:end] ret = [] for rel in range(size): abs_ = start + rel r = mySlice[rel] a = self._RecordStatsClass(origList, abs_) b = self._RecordStatsClass(mySlice, rel) tup = r, a, b ret.append(tup) return ret batch = page def all(self): origList_len = len(self._origList) return self.page(origList_len, 0, 0, 0) def summary(self): return self._SummaryClass(self._origList) """ ********************************** Return a pageful of records from a sequence, with statistics. in : origlist, list or tuple. The entire set of records. This is usually a list of objects or a list of dictionaries. page, int >= 0. Which page to display. size, int >= 1. How many records per page. widow, int >=0. Not implemented. orphan, int >=0. Not implemented. base, int >=0. Number of first page (usually 0 or 1). out: list of (o, b) pairs. The records for the current page. 'o' is the original element from 'origlist' unchanged. 'b' is a Batch object containing meta-info about 'o'. exc: IndexError if 'page' or 'size' is < 1. If 'origlist' is empty or 'page' is too high, it returns an empty list rather than raising an error. origlist_len = len(origlist) start = (page + base) * size end = min(start + size, origlist_len) ret = [] # widow, orphan calculation: adjust 'start' and 'end' up and down, # Set 'widow', 'orphan', 'first_nonwidow', 'first_nonorphan' attributes. for i in range(start, end): o = origlist[i] b = Batch(origlist, size, i) tup = o, b ret.append(tup) return ret def prev(self): # return a PrevNextPage or None def next(self): # return a PrevNextPage or None def prev_batches(self): # return a list of SimpleBatch for the previous batches def next_batches(self): # return a list of SimpleBatch for the next batches ########## PUBLIC MIXIN CLASS FOR CHEETAH TEMPLATES ############## class MondoReportMixin: def batch(self, origList, size=None, start=0, overlap=0, orphan=0): bat = MondoReport(origList) return bat.batch(size, start, overlap, orphan) def batchstats(self, origList): bat = MondoReport(origList) return bat.stats() """ # vim: shiftwidth=4 tabstop=4 expandtab textwidth=79
[ [ 8, 0, 0.0162, 0.0302, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0323, 0.0022, 0, 0.66, 0.0455, 616, 0, 1, 0, 0, 616, 0, 0 ], [ 7, 0, 0.0399, 0.0129, 0, 0.66...
[ "\"\"\"\n@@TR: This code is pretty much unsupported.\n\nMondoReport.py -- Batching module for Python and Cheetah.\n\nVersion 2001-Nov-18. Doesn't do much practical yet, but the companion\ntestMondoReport.py passes all its tests.\n-Mike Orr (Iron)", "import operator", "try:\n from functools import reduce\nex...
"""This package contains classes, functions, objects and packages contributed by Cheetah users. They are not used by Cheetah itself. There is no guarantee that this directory will be included in Cheetah releases, that these objects will remain here forever, or that they will remain backward-compatible. """ # vim: shiftwidth=5 tabstop=5 expandtab
[ [ 8, 0, 0.4375, 0.75, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ] ]
[ "\"\"\"This package contains classes, functions, objects and packages contributed\n by Cheetah users. They are not used by Cheetah itself. There is no\n guarantee that this directory will be included in Cheetah releases, that\n these objects will remain here forever, or that they will remain\n backward-co...
# $Id: CGITemplate.py,v 1.6 2006/01/29 02:09:59 tavis_rudd Exp $ """A subclass of Cheetah.Template for use in CGI scripts. Usage in a template: #extends Cheetah.Tools.CGITemplate #implements respond $cgiHeaders#slurp Usage in a template inheriting a Python class: 1. The template #extends MyPythonClass #implements respond $cgiHeaders#slurp 2. The Python class from Cheetah.Tools import CGITemplate class MyPythonClass(CGITemplate): def cgiHeadersHook(self): return "Content-Type: text/html; charset=koi8-r\n\n" To read GET/POST variables, use the .webInput method defined in Cheetah.Utils.WebInputMixin (available in all templates without importing anything), use Python's 'cgi' module, or make your own arrangements. This class inherits from Cheetah.Template to make it usable in Cheetah's single-inheritance model. Meta-Data ================================================================================ Author: Mike Orr <iron@mso.oz.net> License: This software is released for unlimited distribution under the terms of the MIT license. See the LICENSE file. Version: $Revision: 1.6 $ Start Date: 2001/10/03 Last Revision Date: $Date: 2006/01/29 02:09:59 $ """ __author__ = "Mike Orr <iron@mso.oz.net>" __revision__ = "$Revision: 1.6 $"[11:-2] import os from Cheetah.Template import Template class CGITemplate(Template): """Methods useful in CGI scripts. Any class that inherits this mixin must also inherit Cheetah.Servlet. """ def cgiHeaders(self): """Outputs the CGI headers if this is a CGI script. Usage: $cgiHeaders#slurp Override .cgiHeadersHook() if you want to customize the headers. """ if self.isCgi(): return self.cgiHeadersHook() def cgiHeadersHook(self): """Override if you want to customize the CGI headers. """ return "Content-type: text/html\n\n" def isCgi(self): """Is this a CGI script? """ env = 'REQUEST_METHOD' in os.environ wk = self._CHEETAH__isControlledByWebKit return env and not wk # vim: shiftwidth=4 tabstop=4 expandtab
[ [ 8, 0, 0.2532, 0.4675, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.4935, 0.013, 0, 0.66, 0.2, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.5065, 0.013, 0, 0.66, ...
[ "\"\"\"A subclass of Cheetah.Template for use in CGI scripts.\n\nUsage in a template:\n #extends Cheetah.Tools.CGITemplate\n #implements respond\n $cgiHeaders#slurp\n\nUsage in a template inheriting a Python class:", "__author__ = \"Mike Orr <iron@mso.oz.net>\"", "__revision__ = \"$Revision: 1.6 $\"[11...
# $Id: SiteHierarchy.py,v 1.1 2001/10/11 03:25:54 tavis_rudd Exp $ """Create menus and crumbs from a site hierarchy. You define the site hierarchy as lists/tuples. Each location in the hierarchy is a (url, description) tuple. Each list has the base URL/text in the 0 position, and all the children coming after it. Any child can be a list, representing further depth to the hierarchy. See the end of the file for an example hierarchy. Use Hierarchy(contents, currentURL), where contents is this hierarchy, and currentURL is the position you are currently in. The menubar and crumbs methods give you the HTML output. There are methods you can override to customize the HTML output. """ ################################################## ## DEPENDENCIES import string try: from cStringIO import StringIO except ImportError: from StringIO import StringIO ################################################## ## CLASSES class Hierarchy: def __init__(self, hierarchy, currentURL, prefix='', menuCSSClass=None, crumbCSSClass=None): """ hierarchy is described above, currentURL should be somewhere in the hierarchy. prefix will be added before all of the URLs (to help mitigate the problems with absolute URLs), and if given, cssClass will be used for both links *and* nonlinks. """ self._contents = hierarchy self._currentURL = currentURL if menuCSSClass: self._menuCSSClass = ' class="%s"' % menuCSSClass else: self._menuCSSClass = '' if crumbCSSClass: self._crumbCSSClass = ' class="%s"' % crumbCSSClass else: self._crumbCSSClass = '' self._prefix=prefix ## Main output methods def menuList(self, menuCSSClass=None): """An indented menu list""" if menuCSSClass: self._menuCSSClass = ' class="%s"' % menuCSSClass stream = StringIO() for item in self._contents[1:]: self._menubarRecurse(item, 0, stream) return stream.getvalue() def crumbs(self, crumbCSSClass=None): """The home>where>you>are crumbs""" if crumbCSSClass: self._crumbCSSClass = ' class="%s"' % crumbCSSClass path = [] pos = self._contents while True: ## This is not the fastest algorithm, I'm afraid. ## But it probably won't be for a huge hierarchy anyway. foundAny = False path.append(pos[0]) for item in pos[1:]: if self._inContents(item): if isinstance(item, tuple): path.append(item) break else: pos = item foundAny = True break if not foundAny: break if len(path) == 1: return self.emptyCrumb() return string.join(map(lambda x, self=self: self.crumbLink(x[0], x[1]), path), self.crumbSeperator()) + \ self.crumbTerminator() ## Methods to control the Aesthetics # - override these methods for your own look def menuLink(self, url, text, indent): if url == self._currentURL or self._prefix + url == self._currentURL: return '%s<B%s>%s</B> <BR>\n' % ('&nbsp;'*2*indent, self._menuCSSClass, text) else: return '%s<A HREF="%s%s"%s>%s</A> <BR>\n' % \ ('&nbsp;'*2*indent, self._prefix, url, self._menuCSSClass, text) def crumbLink(self, url, text): if url == self._currentURL or self._prefix + url == self._currentURL: return '<B%s>%s</B>' % (text, self._crumbCSSClass) else: return '<A HREF="%s%s"%s>%s</A>' % \ (self._prefix, url, self._crumbCSSClass, text) def crumbSeperator(self): return '&nbsp;&gt;&nbsp;' def crumbTerminator(self): return '' def emptyCrumb(self): """When you are at the homepage""" return '' ## internal methods def _menubarRecurse(self, contents, indent, stream): if isinstance(contents, tuple): url, text = contents rest = [] else: url, text = contents[0] rest = contents[1:] stream.write(self.menuLink(url, text, indent)) if self._inContents(contents): for item in rest: self._menubarRecurse(item, indent+1, stream) def _inContents(self, contents): if isinstance(contents, tuple): return self._currentURL == contents[0] for item in contents: if self._inContents(item): return True return False ################################################## ## from the command line if __name__ == '__main__': hierarchy = [('/', 'home'), ('/about', 'About Us'), [('/services', 'Services'), [('/services/products', 'Products'), ('/services/products/widget', 'The Widget'), ('/services/products/wedge', 'The Wedge'), ('/services/products/thimble', 'The Thimble'), ], ('/services/prices', 'Prices'), ], ('/contact', 'Contact Us'), ] for url in ['/', '/services', '/services/products/widget', '/contact']: print('<p>', '='*50) print('<br> %s: <br>\n' % url) n = Hierarchy(hierarchy, url, menuCSSClass='menu', crumbCSSClass='crumb', prefix='/here') print(n.menuList()) print('<p>', '-'*50) print(n.crumbs())
[ [ 8, 0, 0.0512, 0.0843, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1145, 0.006, 0, 0.66, 0.25, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 7, 0, 0.1295, 0.0241, 0, 0.66, ...
[ "\"\"\"Create menus and crumbs from a site hierarchy.\n\nYou define the site hierarchy as lists/tuples. Each location in the hierarchy\nis a (url, description) tuple. Each list has the base URL/text in the 0\nposition, and all the children coming after it. Any child can be a list,\nrepresenting further depth to ...
#
[]
[]
from turbocheetah import cheetahsupport TurboCheetah = cheetahsupport.TurboCheetah __all__ = ["TurboCheetah"]
[ [ 1, 0, 0.2, 0.2, 0, 0.66, 0, 172, 0, 1, 0, 0, 172, 0, 0 ], [ 14, 0, 0.6, 0.2, 0, 0.66, 0.5, 316, 7, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 1, 0.2, 0, 0.66, 1, 272, ...
[ "from turbocheetah import cheetahsupport", "TurboCheetah = cheetahsupport.TurboCheetah", "__all__ = [\"TurboCheetah\"]" ]
"Template support for Cheetah" import sys, os, imp from Cheetah import Compiler import pkg_resources def _recompile_template(package, basename, tfile, classname): tmpl = pkg_resources.resource_string(package, "%s.tmpl" % basename) c = Compiler.Compiler(source=tmpl, mainClassName='GenTemplate') code = str(c) mod = imp.new_module(classname) ns = dict() exec(code, ns) tempclass = ns.get("GenTemplate", ns.get('DynamicallyCompiledCheetahTemplate')) assert tempclass tempclass.__name__ = basename setattr(mod, basename, tempclass) sys.modules[classname] = mod return mod class TurboCheetah: extension = "tmpl" def __init__(self, extra_vars_func=None, options=None): if options is None: options = dict() self.get_extra_vars = extra_vars_func self.options = options self.compiledTemplates = {} self.search_path = [] def load_template(self, template=None, template_string=None, template_file=None, loadingSite=False): """Searches for a template along the Python path. Template files must end in ".tmpl" and be in legitimate packages. """ given = len([_f for _f in (template, template_string, template_file) if _f]) if given > 1: raise TypeError( "You may give only one of template, template_string, and " "template_file") if not given: raise TypeError( "You must give one of template, template_string, or " "template_file") if template: return self.load_template_module(template) elif template_string: return self.load_template_string(template_string) elif template_file: return self.load_template_file(template_file) def load_template_module(self, classname): ct = self.compiledTemplates divider = classname.rfind(".") if divider > -1: package = classname[0:divider] basename = classname[divider+1:] else: raise ValueError("All templates must be in a package") if not self.options.get("cheetah.precompiled", False): tfile = pkg_resources.resource_filename(package, "%s.%s" % (basename, self.extension)) if classname in ct: mtime = os.stat(tfile).st_mtime if ct[classname] != mtime: ct[classname] = mtime del sys.modules[classname] mod = _recompile_template(package, basename, tfile, classname) else: mod = __import__(classname, dict(), dict(), [basename]) else: ct[classname] = os.stat(tfile).st_mtime mod = _recompile_template(package, basename, tfile, classname) else: mod = __import__(classname, dict(), dict(), [basename]) tempclass = getattr(mod, basename) return tempclass def load_template_string(self, content): raise NotImplementedError def load_template_file(self, filename): raise NotImplementedError def render(self, info, format="html", fragment=False, template=None, template_string=None, template_file=None): tclass = self.load_template( template=template, template_string=template_string, template_file=template_file) if self.get_extra_vars: extra = self.get_extra_vars() else: extra = {} tempobj = tclass(searchList=[info, extra]) if fragment: return tempobj.fragment() else: return tempobj.respond()
[ [ 8, 0, 0.0091, 0.0091, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0273, 0.0091, 0, 0.66, 0.2, 509, 0, 3, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0455, 0.0091, 0, 0.66, ...
[ "\"Template support for Cheetah\"", "import sys, os, imp", "from Cheetah import Compiler", "import pkg_resources", "def _recompile_template(package, basename, tfile, classname):\n tmpl = pkg_resources.resource_string(package, \"%s.tmpl\" % basename)\n c = Compiler.Compiler(source=tmpl, mainClassName='...
import Cheetah.Template def render(template_file, **kwargs): ''' Cheetah.Django.render() takes the template filename (the filename should be a file in your Django TEMPLATE_DIRS) Any additional keyword arguments are passed into the template are propogated into the template's searchList ''' import django.http import django.template.loader source, loader = django.template.loader.find_template_source(template_file) t = Cheetah.Template.Template(source, searchList=[kwargs]) return django.http.HttpResponse(t.__str__())
[ [ 1, 0, 0.0625, 0.0625, 0, 0.66, 0, 171, 0, 1, 0, 0, 171, 0, 0 ], [ 2, 0, 0.5938, 0.875, 0, 0.66, 1, 24, 0, 2, 1, 0, 0, 0, 4 ], [ 8, 1, 0.4688, 0.5, 1, 0.44, 0,...
[ "import Cheetah.Template", "def render(template_file, **kwargs):\n '''\n Cheetah.Django.render() takes the template filename \n (the filename should be a file in your Django \n TEMPLATE_DIRS)\n\n Any additional keyword arguments are passed into the \n template are propogated ...
''' Provides several CacheStore backends for Cheetah's caching framework. The methods provided by these classes have the same semantics as those in the python-memcached API, except for their return values: set(key, val, time=0) set the value unconditionally add(key, val, time=0) set only if the server doesn't already have this key replace(key, val, time=0) set only if the server already have this key get(key, val) returns val or raises a KeyError delete(key) deletes or raises a KeyError ''' import time from Cheetah.Utils.memcache import Client as MemcachedClient class Error(Exception): pass class AbstractCacheStore(object): def set(self, key, val, time=None): raise NotImplementedError def add(self, key, val, time=None): raise NotImplementedError def replace(self, key, val, time=None): raise NotImplementedError def delete(self, key): raise NotImplementedError def get(self, key): raise NotImplementedError class MemoryCacheStore(AbstractCacheStore): def __init__(self): self._data = {} def set(self, key, val, time=0): self._data[key] = (val, time) def add(self, key, val, time=0): if key in self._data: raise Error('a value for key %r is already in the cache'%key) self._data[key] = (val, time) def replace(self, key, val, time=0): if key in self._data: raise Error('a value for key %r is already in the cache'%key) self._data[key] = (val, time) def delete(self, key): del self._data[key] def get(self, key): (val, exptime) = self._data[key] if exptime and time.time() > exptime: del self._data[key] raise KeyError(key) else: return val def clear(self): self._data.clear() class MemcachedCacheStore(AbstractCacheStore): servers = ('127.0.0.1:11211') def __init__(self, servers=None, debug=False): if servers is None: servers = self.servers self._client = MemcachedClient(servers, debug) def set(self, key, val, time=0): self._client.set(key, val, time) def add(self, key, val, time=0): res = self._client.add(key, val, time) if not res: raise Error('a value for key %r is already in the cache'%key) self._data[key] = (val, time) def replace(self, key, val, time=0): res = self._client.replace(key, val, time) if not res: raise Error('a value for key %r is already in the cache'%key) self._data[key] = (val, time) def delete(self, key): res = self._client.delete(key, time=0) if not res: raise KeyError(key) def get(self, key): val = self._client.get(key) if val is None: raise KeyError(key) else: return val def clear(self): self._client.flush_all()
[ [ 8, 0, 0.0787, 0.1481, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1574, 0.0093, 0, 0.66, 0.1667, 654, 0, 1, 0, 0, 654, 0, 0 ], [ 1, 0, 0.1759, 0.0093, 0, 0.66...
[ "'''\nProvides several CacheStore backends for Cheetah's caching framework. The\nmethods provided by these classes have the same semantics as those in the\npython-memcached API, except for their return values:\n\nset(key, val, time=0)\n set the value unconditionally\nadd(key, val, time=0)", "import time", "fr...
try: from ds.sys.Unspecified import Unspecified except ImportError: class _Unspecified: def __repr__(self): return 'Unspecified' def __str__(self): return 'Unspecified' Unspecified = _Unspecified()
[ [ 7, 0, 0.5556, 1, 0, 0.66, 0, 0, 0, 1, 0, 0, 0, 0, 1 ], [ 1, 1, 0.2222, 0.1111, 1, 0.02, 0, 219, 0, 1, 0, 0, 219, 0, 0 ], [ 3, 1, 0.6667, 0.5556, 1, 0.02, 0, ...
[ "try:\n from ds.sys.Unspecified import Unspecified\nexcept ImportError:\n class _Unspecified:\n def __repr__(self):\n return 'Unspecified' \n def __str__(self):\n return 'Unspecified'", " from ds.sys.Unspecified import Unspecified", " class _Unspecified:\n ...
# $Id: CacheRegion.py,v 1.3 2006/01/28 04:19:30 tavis_rudd Exp $ ''' Cache holder classes for Cheetah: Cache regions are defined using the #cache Cheetah directive. Each cache region can be viewed as a dictionary (keyed by cacheRegionID) handling at least one cache item (the default one). It's possible to add cacheItems in a region by using the `varyBy` #cache directive parameter as in the following example:: #def getArticle this is the article content. #end def #cache varyBy=$getArticleID() $getArticle($getArticleID()) #end cache The code above will generate a CacheRegion and add new cacheItem for each value of $getArticleID(). ''' try: from hashlib import md5 except ImportError: from md5 import md5 import time import Cheetah.CacheStore class CacheItem(object): ''' A CacheItem is a container storing: - cacheID (string) - refreshTime (timestamp or None) : last time the cache was refreshed - data (string) : the content of the cache ''' def __init__(self, cacheItemID, cacheStore): self._cacheItemID = cacheItemID self._cacheStore = cacheStore self._refreshTime = None self._expiryTime = 0 def hasExpired(self): return (self._expiryTime and time.time() > self._expiryTime) def setExpiryTime(self, time): self._expiryTime = time def getExpiryTime(self): return self._expiryTime def setData(self, data): self._refreshTime = time.time() self._cacheStore.set(self._cacheItemID, data, self._expiryTime) def getRefreshTime(self): return self._refreshTime def getData(self): assert self._refreshTime return self._cacheStore.get(self._cacheItemID) def renderOutput(self): """Can be overridden to implement edge-caching""" return self.getData() or "" def clear(self): self._cacheStore.delete(self._cacheItemID) self._refreshTime = None class _CacheDataStoreWrapper(object): def __init__(self, dataStore, keyPrefix): self._dataStore = dataStore self._keyPrefix = keyPrefix def get(self, key): return self._dataStore.get(self._keyPrefix+key) def delete(self, key): self._dataStore.delete(self._keyPrefix+key) def set(self, key, val, time=0): self._dataStore.set(self._keyPrefix+key, val, time=time) class CacheRegion(object): ''' A `CacheRegion` stores some `CacheItem` instances. This implementation stores the data in the memory of the current process. If you need a more advanced data store, create a cacheStore class that works with Cheetah's CacheStore protocol and provide it as the cacheStore argument to __init__. For example you could use Cheetah.CacheStore.MemcachedCacheStore, a wrapper around the Python memcached API (http://www.danga.com/memcached). ''' _cacheItemClass = CacheItem def __init__(self, regionID, templateCacheIdPrefix='', cacheStore=None): self._isNew = True self._regionID = regionID self._templateCacheIdPrefix = templateCacheIdPrefix if not cacheStore: cacheStore = Cheetah.CacheStore.MemoryCacheStore() self._cacheStore = cacheStore self._wrappedCacheDataStore = _CacheDataStoreWrapper( cacheStore, keyPrefix=templateCacheIdPrefix+':'+regionID+':') self._cacheItems = {} def isNew(self): return self._isNew def clear(self): " drop all the caches stored in this cache region " for cacheItemId in self._cacheItems.keys(): cacheItem = self._cacheItems[cacheItemId] cacheItem.clear() del self._cacheItems[cacheItemId] def getCacheItem(self, cacheItemID): """ Lazy access to a cacheItem Try to find a cache in the stored caches. If it doesn't exist, it's created. Returns a `CacheItem` instance. """ cacheItemID = md5(str(cacheItemID)).hexdigest() if cacheItemID not in self._cacheItems: cacheItem = self._cacheItemClass( cacheItemID=cacheItemID, cacheStore=self._wrappedCacheDataStore) self._cacheItems[cacheItemID] = cacheItem self._isNew = False return self._cacheItems[cacheItemID]
[ [ 8, 0, 0.0809, 0.1397, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 7, 0, 0.1728, 0.0294, 0, 0.66, 0.1667, 0, 0, 1, 0, 0, 0, 0, 0 ], [ 1, 1, 0.1691, 0.0074, 1, 0.17, ...
[ "'''\nCache holder classes for Cheetah:\n\nCache regions are defined using the #cache Cheetah directive. Each\ncache region can be viewed as a dictionary (keyed by cacheRegionID)\nhandling at least one cache item (the default one). It's possible to add\ncacheItems in a region by using the `varyBy` #cache directive ...
#!/usr/bin/env python ''' Tests for the 'cheetah' command. Besides unittest usage, recognizes the following command-line options: --list CheetahWrapper.py List all scenarios that are tested. The argument is the path of this script. --nodelete Don't delete scratch directory at end. --output Show the output of each subcommand. (Normally suppressed.) ''' import os import os.path import pdb import re # Used by listTests. import shutil import sys import tempfile import unittest from optparse import OptionParser from Cheetah.CheetahWrapper import CheetahWrapper # Used by NoBackup. try: from subprocess import Popen, PIPE, STDOUT class Popen4(Popen): def __init__(self, cmd, bufsize=-1, shell=True, close_fds=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, **kwargs): super(Popen4, self).__init__(cmd, bufsize=bufsize, shell=shell, close_fds=close_fds, stdin=stdin, stdout=stdout, stderr=stderr, **kwargs) self.tochild = self.stdin self.fromchild = self.stdout self.childerr = self.stderr except ImportError: from popen2 import Popen4 DELETE = True # True to clean up after ourselves, False for debugging. OUTPUT = False # Normally False, True for debugging. BACKUP_SUFFIX = CheetahWrapper.BACKUP_SUFFIX def warn(msg): sys.stderr.write(msg + '\n') class CFBase(unittest.TestCase): """Base class for "cheetah compile" and "cheetah fill" unit tests. """ srcDir = '' # Nonblank to create source directory. subdirs = ('child', 'child/grandkid') # Delete in reverse order. srcFiles = ('a.tmpl', 'child/a.tmpl', 'child/grandkid/a.tmpl') expectError = False # Used by --list option. def inform(self, message): if self.verbose: print(message) def setUp(self): """Create the top-level directories, subdirectories and .tmpl files. """ I = self.inform # Step 1: Create the scratch directory and chdir into it. self.scratchDir = scratchDir = tempfile.mktemp() os.mkdir(scratchDir) self.origCwd = os.getcwd() os.chdir(scratchDir) if self.srcDir: os.mkdir(self.srcDir) # Step 2: Create source subdirectories. for dir in self.subdirs: os.mkdir(dir) # Step 3: Create the .tmpl files, each in its proper directory. for fil in self.srcFiles: f = open(fil, 'w') f.write("Hello, world!\n") f.close() def tearDown(self): os.chdir(self.origCwd) if DELETE: shutil.rmtree(self.scratchDir, True) # Ignore errors. if os.path.exists(self.scratchDir): warn("Warning: unable to delete scratch directory %s") else: warn("Warning: not deleting scratch directory %s" % self.scratchDir) def _checkDestFileHelper(self, path, expected, allowSurroundingText, errmsg): """Low-level helper to check a destination file. in : path, string, the destination path. expected, string, the expected contents. allowSurroundingtext, bool, allow the result to contain additional text around the 'expected' substring? errmsg, string, the error message. It may contain the following "%"-operator keys: path, expected, result. out: None """ path = os.path.abspath(path) exists = os.path.exists(path) msg = "destination file missing: %s" % path self.failUnless(exists, msg) f = open(path, 'r') result = f.read() f.close() if allowSurroundingText: success = result.find(expected) != -1 else: success = result == expected msg = errmsg % locals() self.failUnless(success, msg) def checkCompile(self, path): # Raw string to prevent "\n" from being converted to a newline. #expected = R"write('Hello, world!\n')" expected = "Hello, world!" # might output a u'' string errmsg = """\ destination file %(path)s doesn't contain expected substring: %(expected)r""" self._checkDestFileHelper(path, expected, True, errmsg) def checkFill(self, path): expected = "Hello, world!\n" errmsg = """\ destination file %(path)s contains wrong result. Expected %(expected)r Found %(result)r""" self._checkDestFileHelper(path, expected, False, errmsg) def checkSubdirPyInit(self, path): """Verify a destination subdirectory exists and contains an __init__.py file. """ exists = os.path.exists(path) msg = "destination subdirectory %s misssing" % path self.failUnless(exists, msg) initPath = os.path.join(path, "__init__.py") exists = os.path.exists(initPath) msg = "destination init file missing: %s" % initPath self.failUnless(exists, msg) def checkNoBackup(self, path): """Verify 'path' does not exist. (To check --nobackup.) """ exists = os.path.exists(path) msg = "backup file exists in spite of --nobackup: %s" % path self.failIf(exists, msg) def locate_command(self, cmd): paths = os.getenv('PATH') if not paths: return cmd parts = cmd.split(' ') paths = paths.split(':') for p in paths: p = p + os.path.sep + parts[0] if os.path.isfile(p): return ' '.join([p] + parts[1:]) return ' '.join(parts) def assertWin32Subprocess(self, cmd): _in, _out = os.popen4(cmd) _in.close() output = _out.read() rc = _out.close() if rc is None: rc = 0 return rc, output def assertPosixSubprocess(self, cmd): cmd = self.locate_command(cmd) process = Popen4(cmd, env=os.environ) process.tochild.close() output = process.fromchild.read() status = process.wait() process.fromchild.close() return status, output def assertSubprocess(self, cmd, nonzero=False): status, output = None, None if sys.platform == 'win32': status, output = self.assertWin32Subprocess(cmd) else: status, output = self.assertPosixSubprocess(cmd) if not nonzero: self.failUnlessEqual(status, 0, '''Subprocess exited with a non-zero status (%d) %s''' % (status, output)) else: self.failIfEqual(status, 0, '''Subprocess exited with a zero status (%d) %s''' % (status, output)) return output def go(self, cmd, expectedStatus=0, expectedOutputSubstring=None): """Run a "cheetah compile" or "cheetah fill" subcommand. in : cmd, string, the command to run. expectedStatus, int, subcommand's expected output status. 0 if the subcommand is expected to succeed, 1-255 otherwise. expectedOutputSubstring, string, substring which much appear in the standard output or standard error. None to skip this test. out: None. """ output = self.assertSubprocess(cmd) if expectedOutputSubstring is not None: msg = "substring %r not found in subcommand output: %s" % \ (expectedOutputSubstring, cmd) substringTest = output.find(expectedOutputSubstring) != -1 self.failUnless(substringTest, msg) class CFIdirBase(CFBase): """Subclass for tests with --idir. """ srcDir = 'SRC' subdirs = ('SRC/child', 'SRC/child/grandkid') # Delete in reverse order. srcFiles = ('SRC/a.tmpl', 'SRC/child/a.tmpl', 'SRC/child/grandkid/a.tmpl') ################################################## ## TEST CASE CLASSES class OneFile(CFBase): def testCompile(self): self.go("cheetah compile a.tmpl") self.checkCompile("a.py") def testFill(self): self.go("cheetah fill a.tmpl") self.checkFill("a.html") def testText(self): self.go("cheetah fill --oext txt a.tmpl") self.checkFill("a.txt") class OneFileNoExtension(CFBase): def testCompile(self): self.go("cheetah compile a") self.checkCompile("a.py") def testFill(self): self.go("cheetah fill a") self.checkFill("a.html") def testText(self): self.go("cheetah fill --oext txt a") self.checkFill("a.txt") class SplatTmpl(CFBase): def testCompile(self): self.go("cheetah compile *.tmpl") self.checkCompile("a.py") def testFill(self): self.go("cheetah fill *.tmpl") self.checkFill("a.html") def testText(self): self.go("cheetah fill --oext txt *.tmpl") self.checkFill("a.txt") class ThreeFilesWithSubdirectories(CFBase): def testCompile(self): self.go("cheetah compile a.tmpl child/a.tmpl child/grandkid/a.tmpl") self.checkCompile("a.py") self.checkCompile("child/a.py") self.checkCompile("child/grandkid/a.py") def testFill(self): self.go("cheetah fill a.tmpl child/a.tmpl child/grandkid/a.tmpl") self.checkFill("a.html") self.checkFill("child/a.html") self.checkFill("child/grandkid/a.html") def testText(self): self.go("cheetah fill --oext txt a.tmpl child/a.tmpl child/grandkid/a.tmpl") self.checkFill("a.txt") self.checkFill("child/a.txt") self.checkFill("child/grandkid/a.txt") class ThreeFilesWithSubdirectoriesNoExtension(CFBase): def testCompile(self): self.go("cheetah compile a child/a child/grandkid/a") self.checkCompile("a.py") self.checkCompile("child/a.py") self.checkCompile("child/grandkid/a.py") def testFill(self): self.go("cheetah fill a child/a child/grandkid/a") self.checkFill("a.html") self.checkFill("child/a.html") self.checkFill("child/grandkid/a.html") def testText(self): self.go("cheetah fill --oext txt a child/a child/grandkid/a") self.checkFill("a.txt") self.checkFill("child/a.txt") self.checkFill("child/grandkid/a.txt") class SplatTmplWithSubdirectories(CFBase): def testCompile(self): self.go("cheetah compile *.tmpl child/*.tmpl child/grandkid/*.tmpl") self.checkCompile("a.py") self.checkCompile("child/a.py") self.checkCompile("child/grandkid/a.py") def testFill(self): self.go("cheetah fill *.tmpl child/*.tmpl child/grandkid/*.tmpl") self.checkFill("a.html") self.checkFill("child/a.html") self.checkFill("child/grandkid/a.html") def testText(self): self.go("cheetah fill --oext txt *.tmpl child/*.tmpl child/grandkid/*.tmpl") self.checkFill("a.txt") self.checkFill("child/a.txt") self.checkFill("child/grandkid/a.txt") class OneFileWithOdir(CFBase): def testCompile(self): self.go("cheetah compile --odir DEST a.tmpl") self.checkSubdirPyInit("DEST") self.checkCompile("DEST/a.py") def testFill(self): self.go("cheetah fill --odir DEST a.tmpl") self.checkFill("DEST/a.html") def testText(self): self.go("cheetah fill --odir DEST --oext txt a.tmpl") self.checkFill("DEST/a.txt") class VarietyWithOdir(CFBase): def testCompile(self): self.go("cheetah compile --odir DEST a.tmpl child/a child/grandkid/*.tmpl") self.checkSubdirPyInit("DEST") self.checkSubdirPyInit("DEST/child") self.checkSubdirPyInit("DEST/child/grandkid") self.checkCompile("DEST/a.py") self.checkCompile("DEST/child/a.py") self.checkCompile("DEST/child/grandkid/a.py") def testFill(self): self.go("cheetah fill --odir DEST a.tmpl child/a child/grandkid/*.tmpl") self.checkFill("DEST/a.html") self.checkFill("DEST/child/a.html") self.checkFill("DEST/child/grandkid/a.html") def testText(self): self.go("cheetah fill --odir DEST --oext txt a.tmpl child/a child/grandkid/*.tmpl") self.checkFill("DEST/a.txt") self.checkFill("DEST/child/a.txt") self.checkFill("DEST/child/grandkid/a.txt") class RecurseExplicit(CFBase): def testCompile(self): self.go("cheetah compile -R child") self.checkCompile("child/a.py") self.checkCompile("child/grandkid/a.py") def testFill(self): self.go("cheetah fill -R child") self.checkFill("child/a.html") self.checkFill("child/grandkid/a.html") def testText(self): self.go("cheetah fill -R --oext txt child") self.checkFill("child/a.txt") self.checkFill("child/grandkid/a.txt") class RecurseImplicit(CFBase): def testCompile(self): self.go("cheetah compile -R") self.checkCompile("child/a.py") self.checkCompile("child/grandkid/a.py") def testFill(self): self.go("cheetah fill -R") self.checkFill("a.html") self.checkFill("child/a.html") self.checkFill("child/grandkid/a.html") def testText(self): self.go("cheetah fill -R --oext txt") self.checkFill("a.txt") self.checkFill("child/a.txt") self.checkFill("child/grandkid/a.txt") class RecurseExplicitWIthOdir(CFBase): def testCompile(self): self.go("cheetah compile -R --odir DEST child") self.checkSubdirPyInit("DEST/child") self.checkSubdirPyInit("DEST/child/grandkid") self.checkCompile("DEST/child/a.py") self.checkCompile("DEST/child/grandkid/a.py") def testFill(self): self.go("cheetah fill -R --odir DEST child") self.checkFill("DEST/child/a.html") self.checkFill("DEST/child/grandkid/a.html") def testText(self): self.go("cheetah fill -R --odir DEST --oext txt child") self.checkFill("DEST/child/a.txt") self.checkFill("DEST/child/grandkid/a.txt") class Flat(CFBase): def testCompile(self): self.go("cheetah compile --flat child/a.tmpl") self.checkCompile("a.py") def testFill(self): self.go("cheetah fill --flat child/a.tmpl") self.checkFill("a.html") def testText(self): self.go("cheetah fill --flat --oext txt child/a.tmpl") self.checkFill("a.txt") class FlatRecurseCollision(CFBase): expectError = True def testCompile(self): self.assertSubprocess("cheetah compile -R --flat", nonzero=True) def testFill(self): self.assertSubprocess("cheetah fill -R --flat", nonzero=True) def testText(self): self.assertSubprocess("cheetah fill -R --flat", nonzero=True) class IdirRecurse(CFIdirBase): def testCompile(self): self.go("cheetah compile -R --idir SRC child") self.checkSubdirPyInit("child") self.checkSubdirPyInit("child/grandkid") self.checkCompile("child/a.py") self.checkCompile("child/grandkid/a.py") def testFill(self): self.go("cheetah fill -R --idir SRC child") self.checkFill("child/a.html") self.checkFill("child/grandkid/a.html") def testText(self): self.go("cheetah fill -R --idir SRC --oext txt child") self.checkFill("child/a.txt") self.checkFill("child/grandkid/a.txt") class IdirOdirRecurse(CFIdirBase): def testCompile(self): self.go("cheetah compile -R --idir SRC --odir DEST child") self.checkSubdirPyInit("DEST/child") self.checkSubdirPyInit("DEST/child/grandkid") self.checkCompile("DEST/child/a.py") self.checkCompile("DEST/child/grandkid/a.py") def testFill(self): self.go("cheetah fill -R --idir SRC --odir DEST child") self.checkFill("DEST/child/a.html") self.checkFill("DEST/child/grandkid/a.html") def testText(self): self.go("cheetah fill -R --idir SRC --odir DEST --oext txt child") self.checkFill("DEST/child/a.txt") self.checkFill("DEST/child/grandkid/a.txt") class IdirFlatRecurseCollision(CFIdirBase): expectError = True def testCompile(self): self.assertSubprocess("cheetah compile -R --flat --idir SRC", nonzero=True) def testFill(self): self.assertSubprocess("cheetah fill -R --flat --idir SRC", nonzero=True) def testText(self): self.assertSubprocess("cheetah fill -R --flat --idir SRC --oext txt", nonzero=True) class NoBackup(CFBase): """Run the command twice each time and verify a backup file is *not* created. """ def testCompile(self): self.go("cheetah compile --nobackup a.tmpl") self.go("cheetah compile --nobackup a.tmpl") self.checkNoBackup("a.py" + BACKUP_SUFFIX) def testFill(self): self.go("cheetah fill --nobackup a.tmpl") self.go("cheetah fill --nobackup a.tmpl") self.checkNoBackup("a.html" + BACKUP_SUFFIX) def testText(self): self.go("cheetah fill --nobackup --oext txt a.tmpl") self.go("cheetah fill --nobackup --oext txt a.tmpl") self.checkNoBackup("a.txt" + BACKUP_SUFFIX) def listTests(cheetahWrapperFile): """cheetahWrapperFile, string, path of this script. XXX TODO: don't print test where expectError is true. """ rx = re.compile( R'self\.go\("(.*?)"\)' ) f = open(cheetahWrapperFile) while True: lin = f.readline() if not lin: break m = rx.search(lin) if m: print(m.group(1)) f.close() def main(): global DELETE, OUTPUT parser = OptionParser() parser.add_option("--list", action="store", dest="listTests") parser.add_option("--nodelete", action="store_true") parser.add_option("--output", action="store_true") # The following options are passed to unittest. parser.add_option("-e", "--explain", action="store_true") parser.add_option("-v", "--verbose", action="store_true") parser.add_option("-q", "--quiet", action="store_true") opts, files = parser.parse_args() if opts.nodelete: DELETE = False if opts.output: OUTPUT = True if opts.listTests: listTests(opts.listTests) else: # Eliminate script-specific command-line arguments to prevent # errors in unittest. del sys.argv[1:] for opt in ("explain", "verbose", "quiet"): if getattr(opts, opt): sys.argv.append("--" + opt) sys.argv.extend(files) unittest.main() if __name__ == '__main__': main() # vim: sw=4 ts=4 expandtab
[ [ 8, 0, 0.0131, 0.0209, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0244, 0.0017, 0, 0.66, 0.027, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0262, 0.0017, 0, 0.66,...
[ "'''\nTests for the 'cheetah' command.\n\nBesides unittest usage, recognizes the following command-line options:\n --list CheetahWrapper.py\n List all scenarios that are tested. The argument is the path\n of this script.\n --nodelete", "import os", "import os.path", "import pdb", "impo...
#!/usr/bin/env python ''' Core module of Cheetah's Unit-testing framework TODO ================================================================================ # combo tests # negative test cases for expected exceptions # black-box vs clear-box testing # do some tests that run the Template for long enough to check that the refresh code works ''' import sys import unittest from Cheetah.Tests import SyntaxAndOutput from Cheetah.Tests import NameMapper from Cheetah.Tests import Misc from Cheetah.Tests import Filters from Cheetah.Tests import Template from Cheetah.Tests import Cheps from Cheetah.Tests import Parser from Cheetah.Tests import Regressions from Cheetah.Tests import Unicode from Cheetah.Tests import CheetahWrapper from Cheetah.Tests import Analyzer SyntaxAndOutput.install_eols() suites = [ unittest.findTestCases(SyntaxAndOutput), unittest.findTestCases(NameMapper), unittest.findTestCases(Filters), unittest.findTestCases(Template), #unittest.findTestCases(Cheps), unittest.findTestCases(Regressions), unittest.findTestCases(Unicode), unittest.findTestCases(Misc), unittest.findTestCases(Parser), unittest.findTestCases(Analyzer), ] if not sys.platform.startswith('java'): suites.append(unittest.findTestCases(CheetahWrapper)) if __name__ == '__main__': runner = unittest.TextTestRunner() if 'xml' in sys.argv: import xmlrunner runner = xmlrunner.XMLTestRunner(filename='Cheetah-Tests.xml') results = runner.run(unittest.TestSuite(suites))
[ [ 8, 0, 0.1226, 0.1887, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.2453, 0.0189, 0, 0.66, 0.0588, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.2642, 0.0189, 0, 0.66...
[ "'''\nCore module of Cheetah's Unit-testing framework\n\nTODO\n================================================================================\n# combo tests\n# negative test cases for expected exceptions\n# black-box vs clear-box testing", "import sys", "import unittest", "from Cheetah.Tests import SyntaxAn...
""" XML Test Runner for PyUnit """ # Written by Sebastian Rittau <srittau@jroger.in-berlin.de> and placed in # the Public Domain. With contributions by Paolo Borelli. __revision__ = "$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $" import os.path import re import sys import time import traceback import unittest from StringIO import StringIO from xml.sax.saxutils import escape from StringIO import StringIO class _TestInfo(object): """Information about a particular test. Used by _XMLTestResult. """ def __init__(self, test, time): _pieces = test.id().split('.') (self._class, self._method) = ('.'.join(_pieces[:-1]), _pieces[-1]) self._time = time self._error = None self._failure = None def print_report(self, stream): """Print information about this test case in XML format to the supplied stream. """ stream.write(' <testcase classname="%(class)s" name="%(method)s" time="%(time).4f">' % \ { "class": self._class, "method": self._method, "time": self._time, }) if self._failure != None: self._print_error(stream, 'failure', self._failure) if self._error != None: self._print_error(stream, 'error', self._error) stream.write('</testcase>\n') def _print_error(self, stream, tagname, error): """Print information from a failure or error to the supplied stream.""" text = escape(str(error[1])) stream.write('\n') stream.write(' <%s type="%s">%s\n' \ % (tagname, issubclass(error[0], Exception) and error[0].__name__ or str(error[0]), text)) tb_stream = StringIO() traceback.print_tb(error[2], None, tb_stream) stream.write(escape(tb_stream.getvalue())) stream.write(' </%s>\n' % tagname) stream.write(' ') # Module level functions since Python 2.3 doesn't grok decorators def create_success(test, time): """Create a _TestInfo instance for a successful test.""" return _TestInfo(test, time) def create_failure(test, time, failure): """Create a _TestInfo instance for a failed test.""" info = _TestInfo(test, time) info._failure = failure return info def create_error(test, time, error): """Create a _TestInfo instance for an erroneous test.""" info = _TestInfo(test, time) info._error = error return info class _XMLTestResult(unittest.TestResult): """A test result class that stores result as XML. Used by XMLTestRunner. """ def __init__(self, classname): unittest.TestResult.__init__(self) self._test_name = classname self._start_time = None self._tests = [] self._error = None self._failure = None def startTest(self, test): unittest.TestResult.startTest(self, test) self._error = None self._failure = None self._start_time = time.time() def stopTest(self, test): time_taken = time.time() - self._start_time unittest.TestResult.stopTest(self, test) if self._error: info = create_error(test, time_taken, self._error) elif self._failure: info = create_failure(test, time_taken, self._failure) else: info = create_success(test, time_taken) self._tests.append(info) def addError(self, test, err): unittest.TestResult.addError(self, test, err) self._error = err def addFailure(self, test, err): unittest.TestResult.addFailure(self, test, err) self._failure = err def print_report(self, stream, time_taken, out, err): """Prints the XML report to the supplied stream. The time the tests took to perform as well as the captured standard output and standard error streams must be passed in.a """ stream.write('<testsuite errors="%(e)d" failures="%(f)d" ' % \ { "e": len(self.errors), "f": len(self.failures) }) stream.write('name="%(n)s" tests="%(t)d" time="%(time).3f">\n' % \ { "n": self._test_name, "t": self.testsRun, "time": time_taken, }) for info in self._tests: info.print_report(stream) stream.write(' <system-out><![CDATA[%s]]></system-out>\n' % out) stream.write(' <system-err><![CDATA[%s]]></system-err>\n' % err) stream.write('</testsuite>\n') class XMLTestRunner(object): """A test runner that stores results in XML format compatible with JUnit. XMLTestRunner(stream=None) -> XML test runner The XML file is written to the supplied stream. If stream is None, the results are stored in a file called TEST-<module>.<class>.xml in the current working directory (if not overridden with the path property), where <module> and <class> are the module and class name of the test class. """ def __init__(self, *args, **kwargs): self._stream = kwargs.get('stream') self._filename = kwargs.get('filename') self._path = "." def run(self, test): """Run the given test case or test suite.""" class_ = test.__class__ classname = class_.__module__ + "." + class_.__name__ if self._stream == None: filename = "TEST-%s.xml" % classname if self._filename: filename = self._filename stream = file(os.path.join(self._path, filename), "w") stream.write('<?xml version="1.0" encoding="utf-8"?>\n') else: stream = self._stream result = _XMLTestResult(classname) start_time = time.time() # TODO: Python 2.5: Use the with statement old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = StringIO() sys.stderr = StringIO() try: test(result) try: out_s = sys.stdout.getvalue() except AttributeError: out_s = "" try: err_s = sys.stderr.getvalue() except AttributeError: err_s = "" finally: sys.stdout = old_stdout sys.stderr = old_stderr time_taken = time.time() - start_time result.print_report(stream, time_taken, out_s, err_s) if self._stream == None: stream.close() return result def _set_path(self, path): self._path = path path = property(lambda self: self._path, _set_path, None, """The path where the XML files are stored. This property is ignored when the XML file is written to a file stream.""") class XMLTestRunnerTest(unittest.TestCase): def setUp(self): self._stream = StringIO() def _try_test_run(self, test_class, expected): """Run the test suite against the supplied test class and compare the XML result against the expected XML string. Fail if the expected string doesn't match the actual string. All time attribute in the expected string should have the value "0.000". All error and failure messages are reduced to "Foobar". """ runner = XMLTestRunner(self._stream) runner.run(unittest.makeSuite(test_class)) got = self._stream.getvalue() # Replace all time="X.YYY" attributes by time="0.000" to enable a # simple string comparison. got = re.sub(r'time="\d+\.\d+"', 'time="0.000"', got) # Likewise, replace all failure and error messages by a simple "Foobar" # string. got = re.sub(r'(?s)<failure (.*?)>.*?</failure>', r'<failure \1>Foobar</failure>', got) got = re.sub(r'(?s)<error (.*?)>.*?</error>', r'<error \1>Foobar</error>', got) self.assertEqual(expected, got) def test_no_tests(self): """Regression test: Check whether a test run without any tests matches a previous run. """ class TestTest(unittest.TestCase): pass self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="0" time="0.000"> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_success(self): """Regression test: Check whether a test run with a successful test matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): pass self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_failure(self): """Regression test: Check whether a test run with a failing test matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): self.assert_(False) self._try_test_run(TestTest, """<testsuite errors="0" failures="1" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"> <failure type="exceptions.AssertionError">Foobar</failure> </testcase> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_error(self): """Regression test: Check whether a test run with a erroneous test matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): raise IndexError() self._try_test_run(TestTest, """<testsuite errors="1" failures="0" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"> <error type="exceptions.IndexError">Foobar</error> </testcase> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_stdout_capture(self): """Regression test: Check whether a test run with output to stdout matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): print("Test") self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase> <system-out><![CDATA[Test ]]></system-out> <system-err><![CDATA[]]></system-err> </testsuite> """) def test_stderr_capture(self): """Regression test: Check whether a test run with output to stderr matches a previous run. """ class TestTest(unittest.TestCase): def test_foo(self): sys.stderr.write('Test\n') self._try_test_run(TestTest, """<testsuite errors="0" failures="0" name="unittest.TestSuite" tests="1" time="0.000"> <testcase classname="__main__.TestTest" name="test_foo" time="0.000"></testcase> <system-out><![CDATA[]]></system-out> <system-err><![CDATA[Test ]]></system-err> </testsuite> """) class NullStream(object): """A file-like object that discards everything written to it.""" def write(self, buffer): pass def test_unittests_changing_stdout(self): """Check whether the XMLTestRunner recovers gracefully from unit tests that change stdout, but don't change it back properly. """ class TestTest(unittest.TestCase): def test_foo(self): sys.stdout = XMLTestRunnerTest.NullStream() runner = XMLTestRunner(self._stream) runner.run(unittest.makeSuite(TestTest)) def test_unittests_changing_stderr(self): """Check whether the XMLTestRunner recovers gracefully from unit tests that change stderr, but don't change it back properly. """ class TestTest(unittest.TestCase): def test_foo(self): sys.stderr = XMLTestRunnerTest.NullStream() runner = XMLTestRunner(self._stream) runner.run(unittest.makeSuite(TestTest)) class XMLTestProgram(unittest.TestProgram): def runTests(self): if self.testRunner is None: self.testRunner = XMLTestRunner() unittest.TestProgram.runTests(self) main = XMLTestProgram if __name__ == "__main__": main(module=None)
[ [ 8, 0, 0.0052, 0.0079, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.021, 0.0026, 0, 0.66, 0.05, 809, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.0262, 0.0026, 0, 0.66, ...
[ "\"\"\"\nXML Test Runner for PyUnit\n\"\"\"", "__revision__ = \"$Id: /private/python/stdlib/xmlrunner.py 16654 2007-11-12T12:46:35.368945Z srittau $\"", "import os.path", "import re", "import sys", "import time", "import traceback", "import unittest", "from StringIO import StringIO", "from xml.sa...
#!/usr/bin/env python # -*- encoding: utf8 -*- from Cheetah.Template import Template from Cheetah import CheetahWrapper from Cheetah import DummyTransaction import imp import os import sys import tempfile import unittest class CommandLineTest(unittest.TestCase): def createAndCompile(self, source): sourcefile = '-' while sourcefile.find('-') != -1: sourcefile = tempfile.mktemp() fd = open('%s.tmpl' % sourcefile, 'w') fd.write(source) fd.close() wrap = CheetahWrapper.CheetahWrapper() wrap.main(['cheetah', 'compile', '--quiet', '--nobackup', sourcefile]) module_path, module_name = os.path.split(sourcefile) module = loadModule(module_name, [module_path]) template = getattr(module, module_name) return template class JBQ_UTF8_Test1(unittest.TestCase): def runTest(self): t = Template.compile(source="""Main file with |$v| $other""") otherT = Template.compile(source="Other template with |$v|") other = otherT() t.other = other t.v = u'Unicode String' t.other.v = u'Unicode String' assert unicode(t()) class JBQ_UTF8_Test2(unittest.TestCase): def runTest(self): t = Template.compile(source="""Main file with |$v| $other""") otherT = Template.compile(source="Other template with |$v|") other = otherT() t.other = other t.v = u'Unicode String with eacute é' t.other.v = u'Unicode String' assert unicode(t()) class JBQ_UTF8_Test3(unittest.TestCase): def runTest(self): t = Template.compile(source="""Main file with |$v| $other""") otherT = Template.compile(source="Other template with |$v|") other = otherT() t.other = other t.v = u'Unicode String with eacute é' t.other.v = u'Unicode String and an eacute é' assert unicode(t()) class JBQ_UTF8_Test4(unittest.TestCase): def runTest(self): t = Template.compile(source="""#encoding utf-8 Main file with |$v| and eacute in the template é""") t.v = 'Unicode String' assert unicode(t()) class JBQ_UTF8_Test5(unittest.TestCase): def runTest(self): t = Template.compile(source="""#encoding utf-8 Main file with |$v| and eacute in the template é""") t.v = u'Unicode String' assert unicode(t()) def loadModule(moduleName, path=None): if path: assert isinstance(path, list) try: mod = sys.modules[moduleName] except KeyError: fp = None try: fp, pathname, description = imp.find_module(moduleName, path) mod = imp.load_module(moduleName, fp, pathname, description) finally: if fp: fp.close() return mod class JBQ_UTF8_Test6(unittest.TestCase): def runTest(self): source = """#encoding utf-8 #set $someUnicodeString = u"Bébé" Main file with |$v| and eacute in the template é""" t = Template.compile(source=source) t.v = u'Unicode String' assert unicode(t()) class JBQ_UTF8_Test7(CommandLineTest): def runTest(self): source = """#encoding utf-8 #set $someUnicodeString = u"Bébé" Main file with |$v| and eacute in the template é""" template = self.createAndCompile(source) template.v = u'Unicode String' assert unicode(template()) class JBQ_UTF8_Test8(CommandLineTest): def testStaticCompile(self): source = """#encoding utf-8 #set $someUnicodeString = u"Bébé" $someUnicodeString""" template = self.createAndCompile(source)() a = unicode(template).encode("utf-8") self.assertEquals("Bébé", a) def testDynamicCompile(self): source = """#encoding utf-8 #set $someUnicodeString = u"Bébé" $someUnicodeString""" template = Template(source = source) a = unicode(template).encode("utf-8") self.assertEquals("Bébé", a) class EncodeUnicodeCompatTest(unittest.TestCase): """ Taken initially from Red Hat's bugzilla #529332 https://bugzilla.redhat.com/show_bug.cgi?id=529332 """ def runTest(self): t = Template("""Foo ${var}""", filter='EncodeUnicode') t.var = u"Text with some non-ascii characters: åäö" rc = t.respond() assert isinstance(rc, unicode), ('Template.respond() should return unicode', rc) rc = str(t) assert isinstance(rc, str), ('Template.__str__() should return a UTF-8 encoded string', rc) class Unicode_in_SearchList_Test(CommandLineTest): def test_BasicASCII(self): source = '''This is $adjective''' template = self.createAndCompile(source) assert template and issubclass(template, Template) template = template(searchList=[{'adjective' : u'neat'}]) assert template.respond() def test_Thai(self): # The string is something in Thai source = '''This is $foo $adjective''' template = self.createAndCompile(source) assert template and issubclass(template, Template) template = template(searchList=[{'foo' : 'bar', 'adjective' : u'\u0e22\u0e34\u0e19\u0e14\u0e35\u0e15\u0e49\u0e2d\u0e19\u0e23\u0e31\u0e1a'}]) assert template.respond() def test_Thai_utf8(self): utf8 = '\xe0\xb8\xa2\xe0\xb8\xb4\xe0\xb8\x99\xe0\xb8\x94\xe0\xb8\xb5\xe0\xb8\x95\xe0\xb9\x89\xe0\xb8\xad\xe0\xb8\x99\xe0\xb8\xa3\xe0\xb8\xb1\xe0\xb8\x9a' source = '''This is $adjective''' template = self.createAndCompile(source) assert template and issubclass(template, Template) template = template(searchList=[{'adjective' : utf8}]) assert template.respond() class InlineSpanishTest(unittest.TestCase): def setUp(self): super(InlineSpanishTest, self).setUp() self.template = ''' <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <title>Pagina del vendedor</title> </head> <body> $header <h2>Bienvenido $nombre.</h2> <br /><br /><br /> <center> Usted tiene $numpedidos_noconf <a href="">pedidós</a> sin confirmar. <br /><br /> Bodega tiene fecha para $numpedidos_bodega <a href="">pedidos</a>. </center> </body> </html> ''' def test_failure(self): """ Test a template lacking a proper #encoding tag """ self.failUnlessRaises(UnicodeDecodeError, Template, self.template, searchList=[{'header' : '', 'nombre' : '', 'numpedidos_bodega' : '', 'numpedidos_noconf' : ''}]) def test_success(self): """ Test a template with a proper #encoding tag """ template = '#encoding utf-8\n%s' % self.template template = Template(template, searchList=[{'header' : '', 'nombre' : '', 'numpedidos_bodega' : '', 'numpedidos_noconf' : ''}]) self.assertTrue(unicode(template)) if __name__ == '__main__': unittest.main()
[ [ 1, 0, 0.0169, 0.0042, 0, 0.66, 0, 171, 0, 1, 0, 0, 171, 0, 0 ], [ 1, 0, 0.0211, 0.0042, 0, 0.66, 0.0476, 920, 0, 1, 0, 0, 920, 0, 0 ], [ 1, 0, 0.0253, 0.0042, 0, ...
[ "from Cheetah.Template import Template", "from Cheetah import CheetahWrapper", "from Cheetah import DummyTransaction", "import imp", "import os", "import sys", "import tempfile", "import unittest", "class CommandLineTest(unittest.TestCase):\n def createAndCompile(self, source):\n sourcefil...
#!/usr/bin/env python import sys import types import os import os.path import unittest from Cheetah.NameMapper import NotFound, valueForKey, \ valueForName, valueFromSearchList, valueFromFrame, valueFromFrameOrSearchList class DummyClass: classVar1 = 123 def __init__(self): self.instanceVar1 = 123 def __str__(self): return 'object' def meth(self, arg="arff"): return str(arg) def meth1(self, arg="doo"): return arg def meth2(self, arg1="a1", arg2="a2"): raise ValueError def meth3(self): """Tests a bug that Jeff Johnson reported on Oct 1, 2001""" x = 'A string' try: for i in [1, 2, 3, 4]: if x == 2: pass if x == 'xx': pass return x except: raise def dummyFunc(arg="Scooby"): return arg def funcThatRaises(): raise ValueError testNamespace = { 'aStr': 'blarg', 'anInt': 1, 'aFloat': 1.5, 'aDict': {'one': 'item1', 'two': 'item2', 'nestedDict': {'one': 'nestedItem1', 'two': 'nestedItem2', 'funcThatRaises': funcThatRaises, 'aClass': DummyClass, }, 'nestedFunc': dummyFunc, }, 'aClass': DummyClass, 'aFunc': dummyFunc, 'anObj': DummyClass(), 'aMeth': DummyClass().meth1, 'none': None, 'emptyString': '', 'funcThatRaises': funcThatRaises, } autoCallResults = {'aFunc': 'Scooby', 'aMeth': 'doo', } results = testNamespace.copy() results.update({'anObj.meth1': 'doo', 'aDict.one': 'item1', 'aDict.nestedDict': testNamespace['aDict']['nestedDict'], 'aDict.nestedDict.one': 'nestedItem1', 'aDict.nestedDict.aClass': DummyClass, 'aDict.nestedFunc': 'Scooby', 'aClass.classVar1': 123, 'anObj.instanceVar1': 123, 'anObj.meth3': 'A string', }) for k in testNamespace.keys(): # put them in the globals for the valueFromFrame tests exec('%s = testNamespace[k]'%k) ################################################## ## TEST BASE CLASSES class NameMapperTest(unittest.TestCase): failureException = (NotFound, AssertionError) _testNamespace = testNamespace _results = results def namespace(self): return self._testNamespace def VFN(self, name, autocall=True): return valueForName(self.namespace(), name, autocall) def VFS(self, searchList, name, autocall=True): return valueFromSearchList(searchList, name, autocall) # alias to be overriden later get = VFN def check(self, name): got = self.get(name) if name in autoCallResults: expected = autoCallResults[name] else: expected = self._results[name] assert got == expected ################################################## ## TEST CASE CLASSES class VFN(NameMapperTest): def test1(self): """string in dict lookup""" self.check('aStr') def test2(self): """string in dict lookup in a loop""" for i in range(10): self.check('aStr') def test3(self): """int in dict lookup""" self.check('anInt') def test4(self): """int in dict lookup in a loop""" for i in range(10): self.check('anInt') def test5(self): """float in dict lookup""" self.check('aFloat') def test6(self): """float in dict lookup in a loop""" for i in range(10): self.check('aFloat') def test7(self): """class in dict lookup""" self.check('aClass') def test8(self): """class in dict lookup in a loop""" for i in range(10): self.check('aClass') def test9(self): """aFunc in dict lookup""" self.check('aFunc') def test10(self): """aFunc in dict lookup in a loop""" for i in range(10): self.check('aFunc') def test11(self): """aMeth in dict lookup""" self.check('aMeth') def test12(self): """aMeth in dict lookup in a loop""" for i in range(10): self.check('aMeth') def test13(self): """aMeth in dict lookup""" self.check('aMeth') def test14(self): """aMeth in dict lookup in a loop""" for i in range(10): self.check('aMeth') def test15(self): """anObj in dict lookup""" self.check('anObj') def test16(self): """anObj in dict lookup in a loop""" for i in range(10): self.check('anObj') def test17(self): """aDict in dict lookup""" self.check('aDict') def test18(self): """aDict in dict lookup in a loop""" for i in range(10): self.check('aDict') def test17(self): """aDict in dict lookup""" self.check('aDict') def test18(self): """aDict in dict lookup in a loop""" for i in range(10): self.check('aDict') def test19(self): """aClass.classVar1 in dict lookup""" self.check('aClass.classVar1') def test20(self): """aClass.classVar1 in dict lookup in a loop""" for i in range(10): self.check('aClass.classVar1') def test23(self): """anObj.instanceVar1 in dict lookup""" self.check('anObj.instanceVar1') def test24(self): """anObj.instanceVar1 in dict lookup in a loop""" for i in range(10): self.check('anObj.instanceVar1') ## tests 22, 25, and 26 removed when the underscored lookup was removed def test27(self): """anObj.meth1 in dict lookup""" self.check('anObj.meth1') def test28(self): """anObj.meth1 in dict lookup in a loop""" for i in range(10): self.check('anObj.meth1') def test29(self): """aDict.one in dict lookup""" self.check('aDict.one') def test30(self): """aDict.one in dict lookup in a loop""" for i in range(10): self.check('aDict.one') def test31(self): """aDict.nestedDict in dict lookup""" self.check('aDict.nestedDict') def test32(self): """aDict.nestedDict in dict lookup in a loop""" for i in range(10): self.check('aDict.nestedDict') def test33(self): """aDict.nestedDict.one in dict lookup""" self.check('aDict.nestedDict.one') def test34(self): """aDict.nestedDict.one in dict lookup in a loop""" for i in range(10): self.check('aDict.nestedDict.one') def test35(self): """aDict.nestedFunc in dict lookup""" self.check('aDict.nestedFunc') def test36(self): """aDict.nestedFunc in dict lookup in a loop""" for i in range(10): self.check('aDict.nestedFunc') def test37(self): """aDict.nestedFunc in dict lookup - without autocalling""" assert self.get('aDict.nestedFunc', False) == dummyFunc def test38(self): """aDict.nestedFunc in dict lookup in a loop - without autocalling""" for i in range(10): assert self.get('aDict.nestedFunc', False) == dummyFunc def test39(self): """aMeth in dict lookup - without autocalling""" assert self.get('aMeth', False) == self.namespace()['aMeth'] def test40(self): """aMeth in dict lookup in a loop - without autocalling""" for i in range(10): assert self.get('aMeth', False) == self.namespace()['aMeth'] def test41(self): """anObj.meth3 in dict lookup""" self.check('anObj.meth3') def test42(self): """aMeth in dict lookup in a loop""" for i in range(10): self.check('anObj.meth3') def test43(self): """NotFound test""" def test(self=self): self.get('anObj.methX') self.assertRaises(NotFound, test) def test44(self): """NotFound test in a loop""" def test(self=self): self.get('anObj.methX') for i in range(10): self.assertRaises(NotFound, test) def test45(self): """Other exception from meth test""" def test(self=self): self.get('anObj.meth2') self.assertRaises(ValueError, test) def test46(self): """Other exception from meth test in a loop""" def test(self=self): self.get('anObj.meth2') for i in range(10): self.assertRaises(ValueError, test) def test47(self): """None in dict lookup""" self.check('none') def test48(self): """None in dict lookup in a loop""" for i in range(10): self.check('none') def test49(self): """EmptyString in dict lookup""" self.check('emptyString') def test50(self): """EmptyString in dict lookup in a loop""" for i in range(10): self.check('emptyString') def test51(self): """Other exception from func test""" def test(self=self): self.get('funcThatRaises') self.assertRaises(ValueError, test) def test52(self): """Other exception from func test in a loop""" def test(self=self): self.get('funcThatRaises') for i in range(10): self.assertRaises(ValueError, test) def test53(self): """Other exception from func test""" def test(self=self): self.get('aDict.nestedDict.funcThatRaises') self.assertRaises(ValueError, test) def test54(self): """Other exception from func test in a loop""" def test(self=self): self.get('aDict.nestedDict.funcThatRaises') for i in range(10): self.assertRaises(ValueError, test) def test55(self): """aDict.nestedDict.aClass in dict lookup""" self.check('aDict.nestedDict.aClass') def test56(self): """aDict.nestedDict.aClass in dict lookup in a loop""" for i in range(10): self.check('aDict.nestedDict.aClass') def test57(self): """aDict.nestedDict.aClass in dict lookup - without autocalling""" assert self.get('aDict.nestedDict.aClass', False) == DummyClass def test58(self): """aDict.nestedDict.aClass in dict lookup in a loop - without autocalling""" for i in range(10): assert self.get('aDict.nestedDict.aClass', False) == DummyClass def test59(self): """Other exception from func test -- but without autocalling shouldn't raise""" self.get('aDict.nestedDict.funcThatRaises', False) def test60(self): """Other exception from func test in a loop -- but without autocalling shouldn't raise""" for i in range(10): self.get('aDict.nestedDict.funcThatRaises', False) class VFS(VFN): _searchListLength = 1 def searchList(self): lng = self._searchListLength if lng == 1: return [self.namespace()] elif lng == 2: return [self.namespace(), {'dummy':1234}] elif lng == 3: # a tuple for kicks return ({'dummy':1234}, self.namespace(), {'dummy':1234}) elif lng == 4: # a generator for more kicks return self.searchListGenerator() def searchListGenerator(self): class Test: pass for i in [Test(), {'dummy':1234}, self.namespace(), {'dummy':1234}]: yield i def get(self, name, autocall=True): return self.VFS(self.searchList(), name, autocall) class VFS_2namespaces(VFS): _searchListLength = 2 class VFS_3namespaces(VFS): _searchListLength = 3 class VFS_4namespaces(VFS): _searchListLength = 4 class VFF(VFN): def get(self, name, autocall=True): ns = self._testNamespace aStr = ns['aStr'] aFloat = ns['aFloat'] none = 'some' return valueFromFrame(name, autocall) def setUp(self): """Mod some of the data """ self._testNamespace = ns = self._testNamespace.copy() self._results = res = self._results.copy() ns['aStr'] = res['aStr'] = 'BLARG' ns['aFloat'] = res['aFloat'] = 0.1234 res['none'] = 'some' res['True'] = True res['False'] = False res['None'] = None res['eval'] = eval def test_VFF_1(self): """Builtins""" self.check('True') self.check('None') self.check('False') assert self.get('eval', False)==eval assert self.get('range', False)==range class VFFSL(VFS): _searchListLength = 1 def setUp(self): """Mod some of the data """ self._testNamespace = ns = self._testNamespace.copy() self._results = res = self._results.copy() ns['aStr'] = res['aStr'] = 'BLARG' ns['aFloat'] = res['aFloat'] = 0.1234 res['none'] = 'some' del ns['anInt'] # will be picked up by globals def VFFSL(self, searchList, name, autocall=True): anInt = 1 none = 'some' return valueFromFrameOrSearchList(searchList, name, autocall) def get(self, name, autocall=True): return self.VFFSL(self.searchList(), name, autocall) class VFFSL_2(VFFSL): _searchListLength = 2 class VFFSL_3(VFFSL): _searchListLength = 3 class VFFSL_4(VFFSL): _searchListLength = 4 if sys.platform.startswith('java'): del VFF, VFFSL, VFFSL_2, VFFSL_3, VFFSL_4 ################################################## ## if run from the command line ## if __name__ == '__main__': unittest.main()
[ [ 1, 0, 0.0076, 0.0019, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0095, 0.0019, 0, 0.66, 0.0385, 209, 0, 1, 0, 0, 209, 0, 0 ], [ 1, 0, 0.0114, 0.0019, 0, ...
[ "import sys", "import types", "import os", "import os.path", "import unittest", "from Cheetah.NameMapper import NotFound, valueForKey, \\\n valueForName, valueFromSearchList, valueFromFrame, valueFromFrameOrSearchList", "class DummyClass:\n classVar1 = 123\n\n def __init__(self):\n self...
#!/usr/bin/env python import hotshot import hotshot.stats import os import sys import unittest from test import pystone import time import Cheetah.NameMapper import Cheetah.Template # This can be turned on with the `--debug` flag when running the test # and will cause the tests to all just dump out how long they took # insteasd of asserting on duration DEBUG = False # TOLERANCE in Pystones kPS = 1000 TOLERANCE = 0.5*kPS class DurationError(AssertionError): pass _pystone_calibration_mark = None def _pystone_calibration(): global _pystone_calibration_mark if not _pystone_calibration_mark: _pystone_calibration_mark = pystone.pystones(loops=pystone.LOOPS) return _pystone_calibration_mark def perftest(max_num_pystones, current_pystone=None): ''' Performance test decorator based off the 'timedtest' decorator found in this Active State recipe: http://code.activestate.com/recipes/440700/ ''' if not isinstance(max_num_pystones, float): max_num_pystones = float(max_num_pystones) if not current_pystone: current_pystone = _pystone_calibration() def _test(function): def wrapper(*args, **kw): start_time = time.time() try: return function(*args, **kw) finally: total_time = time.time() - start_time if total_time == 0: pystone_total_time = 0 else: pystone_rate = current_pystone[0] / current_pystone[1] pystone_total_time = total_time / pystone_rate global DEBUG if DEBUG: print('The test "%s" took: %s pystones' % (function.func_name, pystone_total_time)) else: if pystone_total_time > (max_num_pystones + TOLERANCE): raise DurationError((('Test too long (%.2f Ps, ' 'need at most %.2f Ps)') % (pystone_total_time, max_num_pystones))) return wrapper return _test class DynamicTemplatePerformanceTest(unittest.TestCase): loops = 10 #@perftest(1200) def test_BasicDynamic(self): template = ''' #def foo(arg1, arg2) #pass #end def ''' for i in range(self.loops): klass = Cheetah.Template.Template.compile(template) assert klass test_BasicDynamic = perftest(1200)(test_BasicDynamic) class PerformanceTest(unittest.TestCase): iterations = 100000 display = False save = False def runTest(self): self.prof = hotshot.Profile('%s.prof' % self.__class__.__name__) self.prof.start() for i in range(self.iterations): if hasattr(self, 'performanceSample'): self.display = True self.performanceSample() self.prof.stop() self.prof.close() if self.display: print('>>> %s (%d iterations) ' % (self.__class__.__name__, self.iterations)) stats = hotshot.stats.load('%s.prof' % self.__class__.__name__) #stats.strip_dirs() stats.sort_stats('time', 'calls') stats.print_stats(50) if not self.save: os.unlink('%s.prof' % self.__class__.__name__) class DynamicMethodCompilationTest(PerformanceTest): def performanceSample(self): template = ''' #import sys #import os #def testMethod() #set foo = [1, 2, 3, 4] #return $foo[0] #end def ''' template = Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False) template = template() value = template.testMethod() class BunchOfWriteCalls(PerformanceTest): iterations = 1000 def performanceSample(self): template = ''' #import sys #import os #for i in range(1000) $i #end for ''' template = Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False) template = template() value = template.respond() del value class DynamicSimpleCompilationTest(PerformanceTest): def performanceSample(self): template = ''' #import sys #import os #set foo = [1,2,3,4] Well hello there! This is basic. Here's an array too: $foo ''' template = Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False) template = template() template = unicode(template) class FilterTest(PerformanceTest): template = None def setUp(self): super(FilterTest, self).setUp() template = ''' #import sys #import os #set foo = [1, 2, 3, 4] $foo, $foo, $foo ''' template = Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False) self.template = template() def performanceSample(self): value = unicode(self.template) class LongCompileTest(PerformanceTest): ''' Test the compilation on a sufficiently large template ''' def compile(self, template): return Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False) def performanceSample(self): template = ''' #import sys #import Cheetah.Template #extends Cheetah.Template.Template #def header() <center><h2>This is my header</h2></center> #end def #def footer() #return "Huzzah" #end def #def scripts() #pass #end def #def respond() <html> <head> <title>${title}</title> $scripts() </head> <body> $header() #for $i in $range(10) This is just some stupid page! <br/> #end for <br/> $footer() </body> </html> #end def ''' return self.compile(template) class LongCompile_CompilerSettingsTest(LongCompileTest): def compile(self, template): return Cheetah.Template.Template.compile(template, keepRefToGeneratedCode=False, compilerSettings={'useStackFrames' : True, 'useAutocalling' : True}) class LongCompileAndRun(LongCompileTest): def performanceSample(self): template = super(LongCompileAndRun, self).performanceSample() template = template(searchList=[{'title' : 'foo'}]) template = template.respond() if __name__ == '__main__': if '--debug' in sys.argv: DEBUG = True sys.argv = [arg for arg in sys.argv if not arg == '--debug'] unittest.main()
[ [ 1, 0, 0.0123, 0.0041, 0, 0.66, 0, 974, 0, 1, 0, 0, 974, 0, 0 ], [ 1, 0, 0.0165, 0.0041, 0, 0.66, 0.04, 226, 0, 1, 0, 0, 226, 0, 0 ], [ 1, 0, 0.0206, 0.0041, 0, 0....
[ "import hotshot", "import hotshot.stats", "import os", "import sys", "import unittest", "from test import pystone", "import time", "import Cheetah.NameMapper", "import Cheetah.Template", "DEBUG = False", "kPS = 1000", "TOLERANCE = 0.5*kPS", "class DurationError(AssertionError):\n pass", ...
#!/usr/bin/env python import unittest from Cheetah import DirectiveAnalyzer class AnalyzerTests(unittest.TestCase): def test_set(self): template = ''' #set $foo = "bar" Hello ${foo}! ''' calls = DirectiveAnalyzer.analyze(template) self.assertEquals(1, calls.get('set')) def test_compilersettings(self): template = ''' #compiler-settings useNameMapper = False #end compiler-settings ''' calls = DirectiveAnalyzer.analyze(template) self.assertEquals(1, calls.get('compiler-settings')) if __name__ == '__main__': unittest.main()
[ [ 1, 0, 0.1034, 0.0345, 0, 0.66, 0, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.1724, 0.0345, 0, 0.66, 0.3333, 920, 0, 1, 0, 0, 920, 0, 0 ], [ 3, 0, 0.5517, 0.5862, 0, 0....
[ "import unittest", "from Cheetah import DirectiveAnalyzer", "class AnalyzerTests(unittest.TestCase):\n def test_set(self):\n template = '''\n #set $foo = \"bar\"\n Hello ${foo}!\n '''\n calls = DirectiveAnalyzer.analyze(template)\n self.assertEquals(1, calls.get('set...
#!/usr/bin/env python import unittest from Cheetah import SettingsManager class SettingsManagerTests(unittest.TestCase): def test_mergeDictionaries(self): left = {'foo' : 'bar', 'abc' : {'a' : 1, 'b' : 2, 'c' : (3,)}} right = {'xyz' : (10, 9)} expect = {'xyz': (10, 9), 'foo': 'bar', 'abc': {'a': 1, 'c': (3,), 'b': 2}} result = SettingsManager.mergeNestedDictionaries(left, right) self.assertEquals(result, expect) if __name__ == '__main__': unittest.main()
[ [ 1, 0, 0.15, 0.05, 0, 0.66, 0, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.25, 0.05, 0, 0.66, 0.3333, 920, 0, 1, 0, 0, 920, 0, 0 ], [ 3, 0, 0.575, 0.4, 0, 0.66, 0.66...
[ "import unittest", "from Cheetah import SettingsManager", "class SettingsManagerTests(unittest.TestCase):\n def test_mergeDictionaries(self):\n left = {'foo' : 'bar', 'abc' : {'a' : 1, 'b' : 2, 'c' : (3,)}}\n right = {'xyz' : (10, 9)}\n expect = {'xyz': (10, 9), 'foo': 'bar', 'abc': {'a'...
#
[]
[]
#!/usr/bin/env python import Cheetah.NameMapper import Cheetah.Template import sys import unittest majorVer, minorVer = sys.version_info[0], sys.version_info[1] versionTuple = (majorVer, minorVer) def isPython23(): ''' Python 2.3 is still supported by Cheetah, but doesn't support decorators ''' return majorVer == 2 and minorVer < 4 class GetAttrException(Exception): pass class CustomGetAttrClass(object): def __getattr__(self, name): raise GetAttrException('FAIL, %s' % name) class GetAttrTest(unittest.TestCase): ''' Test for an issue occurring when __getatttr__() raises an exception causing NameMapper to raise a NotFound exception ''' def test_ValidException(self): o = CustomGetAttrClass() try: print(o.attr) except GetAttrException, e: # expected return except: self.fail('Invalid exception raised: %s' % e) self.fail('Should have had an exception raised') def test_NotFoundException(self): template = ''' #def raiseme() $obj.attr #end def''' template = Cheetah.Template.Template.compile(template, compilerSettings={}, keepRefToGeneratedCode=True) template = template(searchList=[{'obj' : CustomGetAttrClass()}]) assert template, 'We should have a valid template object by now' self.failUnlessRaises(GetAttrException, template.raiseme) class InlineImportTest(unittest.TestCase): def test_FromFooImportThing(self): ''' Verify that a bug introduced in v2.1.0 where an inline: #from module import class would result in the following code being generated: import class ''' template = ''' #def myfunction() #if True #from os import path #return 17 Hello! #end if #end def ''' template = Cheetah.Template.Template.compile(template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True) template = template(searchList=[{}]) assert template, 'We should have a valid template object by now' rc = template.myfunction() assert rc == 17, (template, 'Didn\'t get a proper return value') def test_ImportFailModule(self): template = ''' #try #import invalidmodule #except #set invalidmodule = dict(FOO='BAR!') #end try $invalidmodule.FOO ''' template = Cheetah.Template.Template.compile(template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True) template = template(searchList=[{}]) assert template, 'We should have a valid template object by now' assert str(template), 'We weren\'t able to properly generate the result from the template' def test_ProperImportOfBadModule(self): template = ''' #from invalid import fail This should totally $fail ''' self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template, compilerSettings={'useLegacyImportMode' : False}, keepRefToGeneratedCode=True) def test_AutoImporting(self): template = ''' #extends FakeyTemplate Boo! ''' self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template) def test_StuffBeforeImport_Legacy(self): template = ''' ### ### I like comments before import ### #extends Foo Bar ''' self.failUnlessRaises(ImportError, Cheetah.Template.Template.compile, template, compilerSettings={'useLegacyImportMode' : True}, keepRefToGeneratedCode=True) class Mantis_Issue_11_Regression_Test(unittest.TestCase): ''' Test case for bug outlined in Mantis issue #11: Output: Traceback (most recent call last): File "test.py", line 12, in <module> t.respond() File "DynamicallyCompiledCheetahTemplate.py", line 86, in respond File "/usr/lib64/python2.6/cgi.py", line 1035, in escape s = s.replace("&", "&") # Must be done first! ''' def test_FailingBehavior(self): import cgi template = Cheetah.Template.Template("$escape($request)", searchList=[{'escape' : cgi.escape, 'request' : 'foobar'}]) assert template self.failUnlessRaises(AttributeError, template.respond) def test_FailingBehaviorWithSetting(self): import cgi template = Cheetah.Template.Template("$escape($request)", searchList=[{'escape' : cgi.escape, 'request' : 'foobar'}], compilerSettings={'prioritizeSearchListOverSelf' : True}) assert template assert template.respond() class Mantis_Issue_21_Regression_Test(unittest.TestCase): ''' Test case for bug outlined in issue #21 Effectively @staticmethod and @classmethod decorated methods in templates don't properly define the _filter local, which breaks when using the NameMapper ''' def runTest(self): if isPython23(): return template = ''' #@staticmethod #def testMethod() This is my $output #end def ''' template = Cheetah.Template.Template.compile(template) assert template assert template.testMethod(output='bug') # raises a NameError: global name '_filter' is not defined class Mantis_Issue_22_Regression_Test(unittest.TestCase): ''' Test case for bug outlined in issue #22 When using @staticmethod and @classmethod in conjunction with the #filter directive the generated code for the #filter is reliant on the `self` local, breaking the function ''' def test_NoneFilter(self): # XXX: Disabling this test for now return if isPython23(): return template = ''' #@staticmethod #def testMethod() #filter None This is my $output #end filter #end def ''' template = Cheetah.Template.Template.compile(template) assert template assert template.testMethod(output='bug') def test_DefinedFilter(self): # XXX: Disabling this test for now return if isPython23(): return template = ''' #@staticmethod #def testMethod() #filter Filter This is my $output #end filter #end def ''' # The generated code for the template's testMethod() should look something # like this in the 'error' case: ''' @staticmethod def testMethod(**KWS): ## CHEETAH: generated from #def testMethod() at line 3, col 13. trans = DummyTransaction() _dummyTrans = True write = trans.response().write SL = [KWS] _filter = lambda x, **kwargs: unicode(x) ######################################## ## START - generated method body _orig_filter_18517345 = _filter filterName = u'Filter' if self._CHEETAH__filters.has_key("Filter"): _filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName] else: _filter = self._CHEETAH__currentFilter = \ self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter write(u' This is my ') _v = VFFSL(SL,"output",True) # u'$output' on line 5, col 32 if _v is not None: write(_filter(_v, rawExpr=u'$output')) # from line 5, col 32. ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" ''' template = Cheetah.Template.Template.compile(template) assert template assert template.testMethod(output='bug') if __name__ == '__main__': unittest.main()
[ [ 1, 0, 0.0122, 0.0041, 0, 0.66, 0, 308, 0, 1, 0, 0, 308, 0, 0 ], [ 1, 0, 0.0163, 0.0041, 0, 0.66, 0.0714, 171, 0, 1, 0, 0, 171, 0, 0 ], [ 1, 0, 0.0244, 0.0041, 0, ...
[ "import Cheetah.NameMapper", "import Cheetah.Template", "import sys", "import unittest", "majorVer, minorVer = sys.version_info[0], sys.version_info[1]", "versionTuple = (majorVer, minorVer)", "def isPython23():\n ''' Python 2.3 is still supported by Cheetah, but doesn't support decorators '''\n r...
#!/usr/bin/env python import unittest from Cheetah import Parser class ArgListTest(unittest.TestCase): def setUp(self): super(ArgListTest, self).setUp() self.al = Parser.ArgList() def test_merge1(self): ''' Testing the ArgList case results from Template.Preprocessors.test_complexUsage ''' self.al.add_argument('arg') expect = [('arg', None)] self.assertEquals(expect, self.al.merge()) def test_merge2(self): ''' Testing the ArgList case results from SyntaxAndOutput.BlockDirective.test4 ''' self.al.add_argument('a') self.al.add_default('999') self.al.next() self.al.add_argument('b') self.al.add_default('444') expect = [(u'a', u'999'), (u'b', u'444')] self.assertEquals(expect, self.al.merge()) def test_merge3(self): ''' Testing the ArgList case results from SyntaxAndOutput.BlockDirective.test13 ''' self.al.add_argument('arg') self.al.add_default("'This is my block'") expect = [('arg', "'This is my block'")] self.assertEquals(expect, self.al.merge()) if __name__ == '__main__': unittest.main()
[ [ 1, 0, 0.0612, 0.0204, 0, 0.66, 0, 88, 0, 1, 0, 0, 88, 0, 0 ], [ 1, 0, 0.102, 0.0204, 0, 0.66, 0.3333, 920, 0, 1, 0, 0, 920, 0, 0 ], [ 3, 0, 0.5306, 0.7959, 0, 0.6...
[ "import unittest", "from Cheetah import Parser", "class ArgListTest(unittest.TestCase):\n def setUp(self):\n super(ArgListTest, self).setUp()\n self.al = Parser.ArgList()\n\n def test_merge1(self):\n ''' \n Testing the ArgList case results from Template.Preprocessors.test_c...
from glob import glob import os from os import listdir import os.path import re from tempfile import mktemp def _escapeRegexChars(txt, escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')): return escapeRE.sub(r'\\\1', txt) def findFiles(*args, **kw): """Recursively find all the files matching a glob pattern. This function is a wrapper around the FileFinder class. See its docstring for details about the accepted arguments, etc.""" return FileFinder(*args, **kw).files() def replaceStrInFiles(files, theStr, repl): """Replace all instances of 'theStr' with 'repl' for each file in the 'files' list. Returns a dictionary with data about the matches found. This is like string.replace() on a multi-file basis. This function is a wrapper around the FindAndReplace class. See its docstring for more details.""" pattern = _escapeRegexChars(theStr) return FindAndReplace(files, pattern, repl).results() def replaceRegexInFiles(files, pattern, repl): """Replace all instances of regex 'pattern' with 'repl' for each file in the 'files' list. Returns a dictionary with data about the matches found. This is like re.sub on a multi-file basis. This function is a wrapper around the FindAndReplace class. See its docstring for more details.""" return FindAndReplace(files, pattern, repl).results() ################################################## ## CLASSES class FileFinder: """Traverses a directory tree and finds all files in it that match one of the specified glob patterns.""" def __init__(self, rootPath, globPatterns=('*',), ignoreBasenames=('CVS', '.svn'), ignoreDirs=(), ): self._rootPath = rootPath self._globPatterns = globPatterns self._ignoreBasenames = ignoreBasenames self._ignoreDirs = ignoreDirs self._files = [] self.walkDirTree(rootPath) def walkDirTree(self, dir='.', listdir=os.listdir, isdir=os.path.isdir, join=os.path.join, ): """Recursively walk through a directory tree and find matching files.""" processDir = self.processDir filterDir = self.filterDir pendingDirs = [dir] addDir = pendingDirs.append getDir = pendingDirs.pop while pendingDirs: dir = getDir() ## process this dir processDir(dir) ## and add sub-dirs for baseName in listdir(dir): fullPath = join(dir, baseName) if isdir(fullPath): if filterDir(baseName, fullPath): addDir( fullPath ) def filterDir(self, baseName, fullPath): """A hook for filtering out certain dirs. """ return not (baseName in self._ignoreBasenames or fullPath in self._ignoreDirs) def processDir(self, dir, glob=glob): extend = self._files.extend for pattern in self._globPatterns: extend( glob(os.path.join(dir, pattern)) ) def files(self): return self._files class _GenSubberFunc: """Converts a 'sub' string in the form that one feeds to re.sub (backrefs, groups, etc.) into a function that can be used to do the substitutions in the FindAndReplace class.""" backrefRE = re.compile(r'\\([1-9][0-9]*)') groupRE = re.compile(r'\\g<([a-zA-Z_][a-zA-Z_]*)>') def __init__(self, replaceStr): self._src = replaceStr self._pos = 0 self._codeChunks = [] self.parse() def src(self): return self._src def pos(self): return self._pos def setPos(self, pos): self._pos = pos def atEnd(self): return self._pos >= len(self._src) def advance(self, offset=1): self._pos += offset def readTo(self, to, start=None): if start == None: start = self._pos self._pos = to if self.atEnd(): return self._src[start:] else: return self._src[start:to] ## match and get methods def matchBackref(self): return self.backrefRE.match(self.src(), self.pos()) def getBackref(self): m = self.matchBackref() self.setPos(m.end()) return m.group(1) def matchGroup(self): return self.groupRE.match(self.src(), self.pos()) def getGroup(self): m = self.matchGroup() self.setPos(m.end()) return m.group(1) ## main parse loop and the eat methods def parse(self): while not self.atEnd(): if self.matchBackref(): self.eatBackref() elif self.matchGroup(): self.eatGroup() else: self.eatStrConst() def eatStrConst(self): startPos = self.pos() while not self.atEnd(): if self.matchBackref() or self.matchGroup(): break else: self.advance() strConst = self.readTo(self.pos(), start=startPos) self.addChunk(repr(strConst)) def eatBackref(self): self.addChunk( 'm.group(' + self.getBackref() + ')' ) def eatGroup(self): self.addChunk( 'm.group("' + self.getGroup() + '")' ) def addChunk(self, chunk): self._codeChunks.append(chunk) ## code wrapping methods def codeBody(self): return ', '.join(self._codeChunks) def code(self): return "def subber(m):\n\treturn ''.join([%s])\n" % (self.codeBody()) def subberFunc(self): exec(self.code()) return subber class FindAndReplace: """Find and replace all instances of 'patternOrRE' with 'replacement' for each file in the 'files' list. This is a multi-file version of re.sub(). 'patternOrRE' can be a raw regex pattern or a regex object as generated by the re module. 'replacement' can be any string that would work with patternOrRE.sub(replacement, fileContents). """ def __init__(self, files, patternOrRE, replacement, recordResults=True): if isinstance(patternOrRE, basestring): self._regex = re.compile(patternOrRE) else: self._regex = patternOrRE if isinstance(replacement, basestring): self._subber = _GenSubberFunc(replacement).subberFunc() else: self._subber = replacement self._pattern = pattern = self._regex.pattern self._files = files self._results = {} self._recordResults = recordResults ## see if we should use pgrep to do the file matching self._usePgrep = False if (os.popen3('pgrep')[2].read()).startswith('Usage:'): ## now check to make sure pgrep understands the pattern tmpFile = mktemp() open(tmpFile, 'w').write('#') if not (os.popen3('pgrep "' + pattern + '" ' + tmpFile)[2].read()): # it didn't print an error msg so we're ok self._usePgrep = True os.remove(tmpFile) self._run() def results(self): return self._results def _run(self): regex = self._regex subber = self._subDispatcher usePgrep = self._usePgrep pattern = self._pattern for file in self._files: if not os.path.isfile(file): continue # skip dirs etc. self._currFile = file found = False if 'orig' in locals(): del orig if self._usePgrep: if os.popen('pgrep "' + pattern + '" ' + file ).read(): found = True else: orig = open(file).read() if regex.search(orig): found = True if found: if 'orig' not in locals(): orig = open(file).read() new = regex.sub(subber, orig) open(file, 'w').write(new) def _subDispatcher(self, match): if self._recordResults: if self._currFile not in self._results: res = self._results[self._currFile] = {} res['count'] = 0 res['matches'] = [] else: res = self._results[self._currFile] res['count'] += 1 res['matches'].append({'contents': match.group(), 'start': match.start(), 'end': match.end(), } ) return self._subber(match) class SourceFileStats: """ """ _fileStats = None def __init__(self, files): self._fileStats = stats = {} for file in files: stats[file] = self.getFileStats(file) def rawStats(self): return self._fileStats def summary(self): codeLines = 0 blankLines = 0 commentLines = 0 totalLines = 0 for fileStats in self.rawStats().values(): codeLines += fileStats['codeLines'] blankLines += fileStats['blankLines'] commentLines += fileStats['commentLines'] totalLines += fileStats['totalLines'] stats = {'codeLines': codeLines, 'blankLines': blankLines, 'commentLines': commentLines, 'totalLines': totalLines, } return stats def printStats(self): pass def getFileStats(self, fileName): codeLines = 0 blankLines = 0 commentLines = 0 commentLineRe = re.compile(r'\s#.*$') blankLineRe = re.compile('\s$') lines = open(fileName).read().splitlines() totalLines = len(lines) for line in lines: if commentLineRe.match(line): commentLines += 1 elif blankLineRe.match(line): blankLines += 1 else: codeLines += 1 stats = {'codeLines': codeLines, 'blankLines': blankLines, 'commentLines': commentLines, 'totalLines': totalLines, } return stats
[ [ 1, 0, 0.0056, 0.0028, 0, 0.66, 0, 958, 0, 1, 0, 0, 958, 0, 0 ], [ 1, 0, 0.0084, 0.0028, 0, 0.66, 0.0769, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0112, 0.0028, 0, ...
[ "from glob import glob", "import os", "from os import listdir", "import os.path", "import re", "from tempfile import mktemp", "def _escapeRegexChars(txt,\n escapeRE=re.compile(r'([\\$\\^\\*\\+\\.\\?\\{\\}\\[\\]\\(\\)\\|\\\\])')):\n return escapeRE.sub(r'\\\\\\1', txt)", " ret...
#
[]
[]
import gettext _ = gettext.gettext class I18n(object): def __init__(self, parser): pass ## junk I'm playing with to test the macro framework # def parseArgs(self, parser, startPos): # parser.getWhiteSpace() # args = parser.getExpression(useNameMapper=False, # pyTokensToBreakAt=[':']).strip() # return args # # def convertArgStrToDict(self, args, parser=None, startPos=None): # def getArgs(*pargs, **kws): # return pargs, kws # exec 'positionalArgs, kwArgs = getArgs(%(args)s)'%locals() # return kwArgs def __call__(self, src, # aka message, plural=None, n=None, # should be a string representing the name of the # '$var' rather than $var itself id=None, domain=None, source=None, target=None, comment=None, # args that are automatically supplied by the parser when the # macro is called: parser=None, macros=None, isShortForm=False, EOLCharsInShortForm=None, startPos=None, endPos=None, ): """This is just a stub at this time. plural = the plural form of the message n = a sized argument to distinguish between single and plural forms id = msgid in the translation catalog domain = translation domain source = source lang target = a specific target lang comment = a comment to the translation team See the following for some ideas http://www.zope.org/DevHome/Wikis/DevSite/Projects/ComponentArchitecture/ZPTInternationalizationSupport Other notes: - There is no need to replicate the i18n:name attribute from plone / PTL, as cheetah placeholders serve the same purpose """ #print macros['i18n'] src = _(src) if isShortForm and endPos<len(parser): return src+EOLCharsInShortForm else: return src
[ [ 1, 0, 0.0149, 0.0149, 0, 0.66, 0, 723, 0, 1, 0, 0, 723, 0, 0 ], [ 14, 0, 0.0299, 0.0149, 0, 0.66, 0.5, 660, 7, 0, 0, 0, 0, 0, 0 ], [ 3, 0, 0.5149, 0.9552, 0, 0.66...
[ "import gettext", "_ = gettext.gettext", "class I18n(object):\n def __init__(self, parser):\n pass\n\n## junk I'm playing with to test the macro framework \n# def parseArgs(self, parser, startPos):\n# parser.getWhiteSpace()\n# args = parser.getExpression(useNameMapper=False,", " ...
''' Compiler classes for Cheetah: ModuleCompiler aka 'Compiler' ClassCompiler MethodCompiler If you are trying to grok this code start with ModuleCompiler.__init__, ModuleCompiler.compile, and ModuleCompiler.__getattr__. ''' import sys import os import os.path from os.path import getmtime, exists import re import types import time import random import warnings import copy from Cheetah.Version import Version, VersionTuple from Cheetah.SettingsManager import SettingsManager from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor from Cheetah import ErrorCatchers from Cheetah import NameMapper from Cheetah.Parser import Parser, ParseError, specialVarRE, \ STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL, SET_MODULE, \ unicodeDirectiveRE, encodingDirectiveRE, escapedNewlineRE from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time class Error(Exception): pass # Settings format: (key, default, docstring) _DEFAULT_COMPILER_SETTINGS = [ ('useNameMapper', True, 'Enable NameMapper for dotted notation and searchList support'), ('useSearchList', True, 'Enable the searchList, requires useNameMapper=True, if disabled, first portion of the $variable is a global, builtin, or local variable that doesn\'t need looking up in the searchList'), ('allowSearchListAsMethArg', True, ''), ('useAutocalling', True, 'Detect and call callable objects in searchList, requires useNameMapper=True'), ('useStackFrames', True, 'Used for NameMapper.valueFromFrameOrSearchList rather than NameMapper.valueFromSearchList'), ('useErrorCatcher', False, 'Turn on the #errorCatcher directive for catching NameMapper errors, etc'), ('alwaysFilterNone', True, 'Filter out None prior to calling the #filter'), ('useFilters', True, 'If False, pass output through str()'), ('includeRawExprInFilterArgs', True, ''), ('useLegacyImportMode', True, 'All #import statements are relocated to the top of the generated Python module'), ('prioritizeSearchListOverSelf', False, 'When iterating the searchList, look into the searchList passed into the initializer instead of Template members first'), ('autoAssignDummyTransactionToSelf', False, ''), ('useKWsDictArgForPassingTrans', True, ''), ('commentOffset', 1, ''), ('outputRowColComments', True, ''), ('includeBlockMarkers', False, 'Wrap #block\'s in a comment in the template\'s output'), ('blockMarkerStart', ('\n<!-- START BLOCK: ', ' -->\n'), ''), ('blockMarkerEnd', ('\n<!-- END BLOCK: ', ' -->\n'), ''), ('defDocStrMsg', 'Autogenerated by Cheetah: The Python-Powered Template Engine', ''), ('setup__str__method', False, ''), ('mainMethodName', 'respond', ''), ('mainMethodNameForSubclasses', 'writeBody', ''), ('indentationStep', ' ' * 4, ''), ('initialMethIndentLevel', 2, ''), ('monitorSrcFile', False, ''), ('outputMethodsBeforeAttributes', True, ''), ('addTimestampsToCompilerOutput', True, ''), ## Customizing the #extends directive ('autoImportForExtendsDirective', True, ''), ('handlerForExtendsDirective', None, ''), ('disabledDirectives', [], 'List of directive keys to disable (without starting "#")'), ('enabledDirectives', [], 'List of directive keys to enable (without starting "#")'), ('disabledDirectiveHooks', [], 'callable(parser, directiveKey)'), ('preparseDirectiveHooks', [], 'callable(parser, directiveKey)'), ('postparseDirectiveHooks', [], 'callable(parser, directiveKey)'), ('preparsePlaceholderHooks', [], 'callable(parser)'), ('postparsePlaceholderHooks', [], 'callable(parser)'), ('expressionFilterHooks', [], '''callable(parser, expr, exprType, rawExpr=None, startPos=None), exprType is the name of the directive, "psp" or "placeholder" The filters *must* return the expr or raise an expression, they can modify the expr if needed'''), ('templateMetaclass', None, 'Strictly optional, only will work with new-style basecalsses as well'), ('i18NFunctionName', 'self.i18n', ''), ('cheetahVarStartToken', '$', ''), ('commentStartToken', '##', ''), ('multiLineCommentStartToken', '#*', ''), ('multiLineCommentEndToken', '*#', ''), ('gobbleWhitespaceAroundMultiLineComments', True, ''), ('directiveStartToken', '#', ''), ('directiveEndToken', '#', ''), ('allowWhitespaceAfterDirectiveStartToken', False, ''), ('PSPStartToken', '<%', ''), ('PSPEndToken', '%>', ''), ('EOLSlurpToken', '#', ''), ('gettextTokens', ["_", "N_", "ngettext"], ''), ('allowExpressionsInExtendsDirective', False, ''), ('allowEmptySingleLineMethods', False, ''), ('allowNestedDefScopes', True, ''), ('allowPlaceholderFilterArgs', True, ''), ] DEFAULT_COMPILER_SETTINGS = dict([(v[0], v[1]) for v in _DEFAULT_COMPILER_SETTINGS]) class GenUtils(object): """An abstract baseclass for the Compiler classes that provides methods that perform generic utility functions or generate pieces of output code from information passed in by the Parser baseclass. These methods don't do any parsing themselves. """ def genTimeInterval(self, timeString): ##@@ TR: need to add some error handling here if timeString[-1] == 's': interval = float(timeString[:-1]) elif timeString[-1] == 'm': interval = float(timeString[:-1])*60 elif timeString[-1] == 'h': interval = float(timeString[:-1])*60*60 elif timeString[-1] == 'd': interval = float(timeString[:-1])*60*60*24 elif timeString[-1] == 'w': interval = float(timeString[:-1])*60*60*24*7 else: # default to minutes interval = float(timeString)*60 return interval def genCacheInfo(self, cacheTokenParts): """Decipher a placeholder cachetoken """ cacheInfo = {} if cacheTokenParts['REFRESH_CACHE']: cacheInfo['type'] = REFRESH_CACHE cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval']) elif cacheTokenParts['STATIC_CACHE']: cacheInfo['type'] = STATIC_CACHE return cacheInfo # is empty if no cache def genCacheInfoFromArgList(self, argList): cacheInfo = {'type':REFRESH_CACHE} for key, val in argList: if val[0] in '"\'': val = val[1:-1] if key == 'timer': key = 'interval' val = self.genTimeInterval(val) cacheInfo[key] = val return cacheInfo def genCheetahVar(self, nameChunks, plain=False): if nameChunks[0][0] in self.setting('gettextTokens'): self.addGetTextVar(nameChunks) if self.setting('useNameMapper') and not plain: return self.genNameMapperVar(nameChunks) else: return self.genPlainVar(nameChunks) def addGetTextVar(self, nameChunks): """Output something that gettext can recognize. This is a harmless side effect necessary to make gettext work when it is scanning compiled templates for strings marked for translation. @@TR: another marginally more efficient approach would be to put the output in a dummy method that is never called. """ # @@TR: this should be in the compiler not here self.addChunk("if False:") self.indent() self.addChunk(self.genPlainVar(nameChunks[:])) self.dedent() def genPlainVar(self, nameChunks): """Generate Python code for a Cheetah $var without using NameMapper (Unified Dotted Notation with the SearchList). """ nameChunks.reverse() chunk = nameChunks.pop() pythonCode = chunk[0] + chunk[2] while nameChunks: chunk = nameChunks.pop() pythonCode = (pythonCode + '.' + chunk[0] + chunk[2]) return pythonCode def genNameMapperVar(self, nameChunks): """Generate valid Python code for a Cheetah $var, using NameMapper (Unified Dotted Notation with the SearchList). nameChunks = list of var subcomponents represented as tuples [ (name,useAC,remainderOfExpr), ] where: name = the dotted name base useAC = where NameMapper should use autocalling on namemapperPart remainderOfExpr = any arglist, index, or slice If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC is False, otherwise it defaults to True. It is overridden by the global setting 'useAutocalling' if this setting is False. EXAMPLE ------------------------------------------------------------------------ if the raw Cheetah Var is $a.b.c[1].d().x.y.z nameChunks is the list [ ('a.b.c',True,'[1]'), # A ('d',False,'()'), # B ('x.y.z',True,''), # C ] When this method is fed the list above it returns VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True) which can be represented as VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2] where: VFN = NameMapper.valueForName VFFSL = NameMapper.valueFromFrameOrSearchList VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL SL = self.searchList() useAC = self.setting('useAutocalling') # True in this example A = ('a.b.c',True,'[1]') B = ('d',False,'()') C = ('x.y.z',True,'') C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True) = VFN(B`, name='x.y.z', executeCallables=True) B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2] A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2] Note, if the compiler setting useStackFrames=False (default is true) then A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2] This option allows Cheetah to be used with Psyco, which doesn't support stack frame introspection. """ defaultUseAC = self.setting('useAutocalling') useSearchList = self.setting('useSearchList') nameChunks.reverse() name, useAC, remainder = nameChunks.pop() if not useSearchList: firstDotIdx = name.find('.') if firstDotIdx != -1 and firstDotIdx < len(name): beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:] pythonCode = ('VFN(' + beforeFirstDot + ',"' + afterDot + '",' + repr(defaultUseAC and useAC) + ')' + remainder) else: pythonCode = name+remainder elif self.setting('useStackFrames'): pythonCode = ('VFFSL(SL,' '"'+ name + '",' + repr(defaultUseAC and useAC) + ')' + remainder) else: pythonCode = ('VFSL([locals()]+SL+[globals(), __builtin__],' '"'+ name + '",' + repr(defaultUseAC and useAC) + ')' + remainder) ## while nameChunks: name, useAC, remainder = nameChunks.pop() pythonCode = ('VFN(' + pythonCode + ',"' + name + '",' + repr(defaultUseAC and useAC) + ')' + remainder) return pythonCode ################################################## ## METHOD COMPILERS class MethodCompiler(GenUtils): def __init__(self, methodName, classCompiler, initialMethodComment=None, decorators=None): self._settingsManager = classCompiler self._classCompiler = classCompiler self._moduleCompiler = classCompiler._moduleCompiler self._methodName = methodName self._initialMethodComment = initialMethodComment self._setupState() self._decorators = decorators or [] def setting(self, key): return self._settingsManager.setting(key) def _setupState(self): self._indent = self.setting('indentationStep') self._indentLev = self.setting('initialMethIndentLevel') self._pendingStrConstChunks = [] self._methodSignature = None self._methodDef = None self._docStringLines = [] self._methodBodyChunks = [] self._cacheRegionsStack = [] self._callRegionsStack = [] self._captureRegionsStack = [] self._filterRegionsStack = [] self._isErrorCatcherOn = False self._hasReturnStatement = False self._isGenerator = False def cleanupState(self): """Called by the containing class compiler instance """ pass def methodName(self): return self._methodName def setMethodName(self, name): self._methodName = name ## methods for managing indentation def indentation(self): return self._indent * self._indentLev def indent(self): self._indentLev +=1 def dedent(self): if self._indentLev: self._indentLev -=1 else: raise Error('Attempt to dedent when the indentLev is 0') ## methods for final code wrapping def methodDef(self): if self._methodDef: return self._methodDef else: return self.wrapCode() __str__ = methodDef __unicode__ = methodDef def wrapCode(self): self.commitStrConst() methodDefChunks = ( self.methodSignature(), '\n', self.docString(), self.methodBody() ) methodDef = ''.join(methodDefChunks) self._methodDef = methodDef return methodDef def methodSignature(self): return self._indent + self._methodSignature + ':' def setMethodSignature(self, signature): self._methodSignature = signature def methodBody(self): return ''.join( self._methodBodyChunks ) def docString(self): if not self._docStringLines: return '' ind = self._indent*2 docStr = (ind + '"""\n' + ind + ('\n' + ind).join([ln.replace('"""', "'''") for ln in self._docStringLines]) + '\n' + ind + '"""\n') return docStr ## methods for adding code def addMethDocString(self, line): self._docStringLines.append(line.replace('%', '%%')) def addChunk(self, chunk): self.commitStrConst() chunk = "\n" + self.indentation() + chunk self._methodBodyChunks.append(chunk) def appendToPrevChunk(self, appendage): self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage def addWriteChunk(self, chunk): self.addChunk('write(' + chunk + ')') def addFilteredChunk(self, chunk, filterArgs=None, rawExpr=None, lineCol=None): if filterArgs is None: filterArgs = '' if self.setting('includeRawExprInFilterArgs') and rawExpr: filterArgs += ', rawExpr=%s'%repr(rawExpr) if self.setting('alwaysFilterNone'): if rawExpr and rawExpr.find('\n')==-1 and rawExpr.find('\r')==-1: self.addChunk("_v = %s # %r"%(chunk, rawExpr)) if lineCol: self.appendToPrevChunk(' on line %s, col %s'%lineCol) else: self.addChunk("_v = %s"%chunk) if self.setting('useFilters'): self.addChunk("if _v is not None: write(_filter(_v%s))"%filterArgs) else: self.addChunk("if _v is not None: write(str(_v))") else: if self.setting('useFilters'): self.addChunk("write(_filter(%s%s))"%(chunk, filterArgs)) else: self.addChunk("write(str(%s))"%chunk) def _appendToPrevStrConst(self, strConst): if self._pendingStrConstChunks: self._pendingStrConstChunks.append(strConst) else: self._pendingStrConstChunks = [strConst] def commitStrConst(self): """Add the code for outputting the pending strConst without chopping off any whitespace from it. """ if not self._pendingStrConstChunks: return strConst = ''.join(self._pendingStrConstChunks) self._pendingStrConstChunks = [] if not strConst: return reprstr = repr(strConst) i = 0 out = [] if reprstr.startswith('u'): i = 1 out = ['u'] body = escapedNewlineRE.sub('\\1\n', reprstr[i+1:-1]) if reprstr[i]=="'": out.append("'''") out.append(body) out.append("'''") else: out.append('"""') out.append(body) out.append('"""') self.addWriteChunk(''.join(out)) def handleWSBeforeDirective(self): """Truncate the pending strCont to the beginning of the current line. """ if self._pendingStrConstChunks: src = self._pendingStrConstChunks[-1] BOL = max(src.rfind('\n')+1, src.rfind('\r')+1, 0) if BOL < len(src): self._pendingStrConstChunks[-1] = src[:BOL] def isErrorCatcherOn(self): return self._isErrorCatcherOn def turnErrorCatcherOn(self): self._isErrorCatcherOn = True def turnErrorCatcherOff(self): self._isErrorCatcherOn = False # @@TR: consider merging the next two methods into one def addStrConst(self, strConst): self._appendToPrevStrConst(strConst) def addRawText(self, text): self.addStrConst(text) def addMethComment(self, comm): offSet = self.setting('commentOffset') self.addChunk('#' + ' '*offSet + comm) def addPlaceholder(self, expr, filterArgs, rawPlaceholder, cacheTokenParts, lineCol, silentMode=False): cacheInfo = self.genCacheInfo(cacheTokenParts) if cacheInfo: cacheInfo['ID'] = repr(rawPlaceholder)[1:-1] self.startCacheRegion(cacheInfo, lineCol, rawPlaceholder=rawPlaceholder) if self.isErrorCatcherOn(): methodName = self._classCompiler.addErrorCatcherCall( expr, rawCode=rawPlaceholder, lineCol=lineCol) expr = 'self.' + methodName + '(localsDict=locals())' if silentMode: self.addChunk('try:') self.indent() self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol) self.dedent() self.addChunk('except NotFound: pass') else: self.addFilteredChunk(expr, filterArgs, rawPlaceholder, lineCol=lineCol) if self.setting('outputRowColComments'): self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.') if cacheInfo: self.endCacheRegion() def addSilent(self, expr): self.addChunk( expr ) def addEcho(self, expr, rawExpr=None): self.addFilteredChunk(expr, rawExpr=rawExpr) def addSet(self, expr, exprComponents, setStyle): if setStyle is SET_GLOBAL: (LVALUE, OP, RVALUE) = (exprComponents.LVALUE, exprComponents.OP, exprComponents.RVALUE) # we need to split the LVALUE to deal with globalSetVars splitPos1 = LVALUE.find('.') splitPos2 = LVALUE.find('[') if splitPos1 > 0 and splitPos2==-1: splitPos = splitPos1 elif splitPos1 > 0 and splitPos1 < max(splitPos2, 0): splitPos = splitPos1 else: splitPos = splitPos2 if splitPos >0: primary = LVALUE[:splitPos] secondary = LVALUE[splitPos:] else: primary = LVALUE secondary = '' LVALUE = 'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip() if setStyle is SET_MODULE: self._moduleCompiler.addModuleGlobal(expr) else: self.addChunk(expr) def addInclude(self, sourceExpr, includeFrom, isRaw): self.addChunk('self._handleCheetahInclude(' + sourceExpr + ', trans=trans, ' + 'includeFrom="' + includeFrom + '", raw=' + repr(isRaw) + ')') def addWhile(self, expr, lineCol=None): self.addIndentingDirective(expr, lineCol=lineCol) def addFor(self, expr, lineCol=None): self.addIndentingDirective(expr, lineCol=lineCol) def addRepeat(self, expr, lineCol=None): #the _repeatCount stuff here allows nesting of #repeat directives self._repeatCount = getattr(self, "_repeatCount", -1) + 1 self.addFor('for __i%s in range(%s)' % (self._repeatCount, expr), lineCol=lineCol) def addIndentingDirective(self, expr, lineCol=None): if expr and not expr[-1] == ':': expr = expr + ':' self.addChunk( expr ) if lineCol: self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol ) self.indent() def addReIndentingDirective(self, expr, dedent=True, lineCol=None): self.commitStrConst() if dedent: self.dedent() if not expr[-1] == ':': expr = expr + ':' self.addChunk( expr ) if lineCol: self.appendToPrevChunk(' # generated from line %s, col %s'%lineCol ) self.indent() def addIf(self, expr, lineCol=None): """For a full #if ... #end if directive """ self.addIndentingDirective(expr, lineCol=lineCol) def addOneLineIf(self, expr, lineCol=None): """For a full #if ... #end if directive """ self.addIndentingDirective(expr, lineCol=lineCol) def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None): """For a single-lie #if ... then .... else ... directive <condition> then <trueExpr> else <falseExpr> """ self.addIndentingDirective(conditionExpr, lineCol=lineCol) self.addFilteredChunk(trueExpr) self.dedent() self.addIndentingDirective('else') self.addFilteredChunk(falseExpr) self.dedent() def addElse(self, expr, dedent=True, lineCol=None): expr = re.sub(r'else[ \f\t]+if', 'elif', expr) self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol) def addElif(self, expr, dedent=True, lineCol=None): self.addElse(expr, dedent=dedent, lineCol=lineCol) def addUnless(self, expr, lineCol=None): self.addIf('if not (' + expr + ')') def addClosure(self, functionName, argsList, parserComment): argStringChunks = [] for arg in argsList: chunk = arg[0] if not arg[1] == None: chunk += '=' + arg[1] argStringChunks.append(chunk) signature = "def " + functionName + "(" + ','.join(argStringChunks) + "):" self.addIndentingDirective(signature) self.addChunk('#'+parserComment) def addTry(self, expr, lineCol=None): self.addIndentingDirective(expr, lineCol=lineCol) def addExcept(self, expr, dedent=True, lineCol=None): self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol) def addFinally(self, expr, dedent=True, lineCol=None): self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol) def addReturn(self, expr): assert not self._isGenerator self.addChunk(expr) self._hasReturnStatement = True def addYield(self, expr): assert not self._hasReturnStatement self._isGenerator = True if expr.replace('yield', '').strip(): self.addChunk(expr) else: self.addChunk('if _dummyTrans:') self.indent() self.addChunk('yield trans.response().getvalue()') self.addChunk('trans = DummyTransaction()') self.addChunk('write = trans.response().write') self.dedent() self.addChunk('else:') self.indent() self.addChunk( 'raise TypeError("This method cannot be called with a trans arg")') self.dedent() def addPass(self, expr): self.addChunk(expr) def addDel(self, expr): self.addChunk(expr) def addAssert(self, expr): self.addChunk(expr) def addRaise(self, expr): self.addChunk(expr) def addBreak(self, expr): self.addChunk(expr) def addContinue(self, expr): self.addChunk(expr) def addPSP(self, PSP): self.commitStrConst() autoIndent = False if PSP[0] == '=': PSP = PSP[1:] if PSP: self.addWriteChunk('_filter(' + PSP + ')') return elif PSP.lower() == 'end': self.dedent() return elif PSP[-1] == '$': autoIndent = True PSP = PSP[:-1] elif PSP[-1] == ':': autoIndent = True for line in PSP.splitlines(): self.addChunk(line) if autoIndent: self.indent() def nextCacheID(self): return ('_'+str(random.randrange(100, 999)) + str(random.randrange(10000, 99999))) def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None): # @@TR: we should add some runtime logging to this ID = self.nextCacheID() interval = cacheInfo.get('interval', None) test = cacheInfo.get('test', None) customID = cacheInfo.get('id', None) if customID: ID = customID varyBy = cacheInfo.get('varyBy', repr(ID)) self._cacheRegionsStack.append(ID) # attrib of current methodCompiler # @@TR: add this to a special class var as well self.addChunk('') self.addChunk('## START CACHE REGION: ID='+ID+ '. line %s, col %s'%lineCol + ' in the source.') self.addChunk('_RECACHE_%(ID)s = False'%locals()) self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='%locals() + repr(ID) + ', cacheInfo=%r'%cacheInfo + ')') self.addChunk('if _cacheRegion_%(ID)s.isNew():'%locals()) self.indent() self.addChunk('_RECACHE_%(ID)s = True'%locals()) self.dedent() self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('%locals() +varyBy+')') self.addChunk('if _cacheItem_%(ID)s.hasExpired():'%locals()) self.indent() self.addChunk('_RECACHE_%(ID)s = True'%locals()) self.dedent() if test: self.addChunk('if ' + test + ':') self.indent() self.addChunk('_RECACHE_%(ID)s = True'%locals()) self.dedent() self.addChunk('if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'%locals()) self.indent() #self.addChunk('print "DEBUG"+"-"*50') self.addChunk('try:') self.indent() self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()'%locals()) self.dedent() self.addChunk('except KeyError:') self.indent() self.addChunk('_RECACHE_%(ID)s = True'%locals()) #self.addChunk('print "DEBUG"+"*"*50') self.dedent() self.addChunk('else:') self.indent() self.addWriteChunk('_output') self.addChunk('del _output') self.dedent() self.dedent() self.addChunk('if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'%locals()) self.indent() self.addChunk('_orig_trans%(ID)s = trans'%locals()) self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'%locals()) self.addChunk('write = _cacheCollector_%(ID)s.response().write'%locals()) if interval: self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"%locals()) + str(interval) + ")") def endCacheRegion(self): ID = self._cacheRegionsStack.pop() self.addChunk('trans = _orig_trans%(ID)s'%locals()) self.addChunk('write = trans.response().write') self.addChunk('_cacheData = _cacheCollector_%(ID)s.response().getvalue()'%locals()) self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)'%locals()) self.addWriteChunk('_cacheData') self.addChunk('del _cacheData') self.addChunk('del _cacheCollector_%(ID)s'%locals()) self.addChunk('del _orig_trans%(ID)s'%locals()) self.dedent() self.addChunk('## END CACHE REGION: '+ID) self.addChunk('') def nextCallRegionID(self): return self.nextCacheID() def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'): class CallDetails(object): pass callDetails = CallDetails() callDetails.ID = ID = self.nextCallRegionID() callDetails.functionName = functionName callDetails.args = args callDetails.lineCol = lineCol callDetails.usesKeywordArgs = False self._callRegionsStack.append((ID, callDetails)) # attrib of current methodCompiler self.addChunk('## START %(regionTitle)s REGION: '%locals() +ID +' of '+functionName +' at line %s, col %s'%lineCol + ' in the source.') self.addChunk('_orig_trans%(ID)s = trans'%locals()) self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals()) self.addChunk('self._CHEETAH__isBuffering = True') self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals()) self.addChunk('write = _callCollector%(ID)s.response().write'%locals()) def setCallArg(self, argName, lineCol): ID, callDetails = self._callRegionsStack[-1] argName = str(argName) if callDetails.usesKeywordArgs: self._endCallArg() else: callDetails.usesKeywordArgs = True self.addChunk('_callKws%(ID)s = {}'%locals()) self.addChunk('_currentCallArgname%(ID)s = %(argName)r'%locals()) callDetails.currentArgname = argName def _endCallArg(self): ID, callDetails = self._callRegionsStack[-1] currCallArg = callDetails.currentArgname self.addChunk(('_callKws%(ID)s[%(currCallArg)r] =' ' _callCollector%(ID)s.response().getvalue()')%locals()) self.addChunk('del _callCollector%(ID)s'%locals()) self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'%locals()) self.addChunk('write = _callCollector%(ID)s.response().write'%locals()) def endCallRegion(self, regionTitle='CALL'): ID, callDetails = self._callRegionsStack[-1] functionName, initialKwArgs, lineCol = ( callDetails.functionName, callDetails.args, callDetails.lineCol) def reset(ID=ID): self.addChunk('trans = _orig_trans%(ID)s'%locals()) self.addChunk('write = trans.response().write') self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals()) self.addChunk('del _wasBuffering%(ID)s'%locals()) self.addChunk('del _orig_trans%(ID)s'%locals()) if not callDetails.usesKeywordArgs: reset() self.addChunk('_callArgVal%(ID)s = _callCollector%(ID)s.response().getvalue()'%locals()) self.addChunk('del _callCollector%(ID)s'%locals()) if initialKwArgs: initialKwArgs = ', '+initialKwArgs self.addFilteredChunk('%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'%locals()) self.addChunk('del _callArgVal%(ID)s'%locals()) else: if initialKwArgs: initialKwArgs = initialKwArgs+', ' self._endCallArg() reset() self.addFilteredChunk('%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'%locals()) self.addChunk('del _callKws%(ID)s'%locals()) self.addChunk('## END %(regionTitle)s REGION: '%locals() +ID +' of '+functionName +' at line %s, col %s'%lineCol + ' in the source.') self.addChunk('') self._callRegionsStack.pop() # attrib of current methodCompiler def nextCaptureRegionID(self): return self.nextCacheID() def startCaptureRegion(self, assignTo, lineCol): class CaptureDetails: pass captureDetails = CaptureDetails() captureDetails.ID = ID = self.nextCaptureRegionID() captureDetails.assignTo = assignTo captureDetails.lineCol = lineCol self._captureRegionsStack.append((ID, captureDetails)) # attrib of current methodCompiler self.addChunk('## START CAPTURE REGION: '+ID +' '+assignTo +' at line %s, col %s'%lineCol + ' in the source.') self.addChunk('_orig_trans%(ID)s = trans'%locals()) self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'%locals()) self.addChunk('self._CHEETAH__isBuffering = True') self.addChunk('trans = _captureCollector%(ID)s = DummyTransaction()'%locals()) self.addChunk('write = _captureCollector%(ID)s.response().write'%locals()) def endCaptureRegion(self): ID, captureDetails = self._captureRegionsStack.pop() assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol) self.addChunk('trans = _orig_trans%(ID)s'%locals()) self.addChunk('write = trans.response().write') self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '%locals()) self.addChunk('%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'%locals()) self.addChunk('del _orig_trans%(ID)s'%locals()) self.addChunk('del _captureCollector%(ID)s'%locals()) self.addChunk('del _wasBuffering%(ID)s'%locals()) def setErrorCatcher(self, errorCatcherName): self.turnErrorCatcherOn() self.addChunk('if self._CHEETAH__errorCatchers.has_key("' + errorCatcherName + '"):') self.indent() self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' + errorCatcherName + '"]') self.dedent() self.addChunk('else:') self.indent() self.addChunk('self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["' + errorCatcherName + '"] = ErrorCatchers.' + errorCatcherName + '(self)' ) self.dedent() def nextFilterRegionID(self): return self.nextCacheID() def setTransform(self, transformer, isKlass): self.addChunk('trans = TransformerTransaction()') self.addChunk('trans._response = trans.response()') self.addChunk('trans._response._filter = %s' % transformer) self.addChunk('write = trans._response.write') def setFilter(self, theFilter, isKlass): class FilterDetails: pass filterDetails = FilterDetails() filterDetails.ID = ID = self.nextFilterRegionID() filterDetails.theFilter = theFilter filterDetails.isKlass = isKlass self._filterRegionsStack.append((ID, filterDetails)) # attrib of current methodCompiler self.addChunk('_orig_filter%(ID)s = _filter'%locals()) if isKlass: self.addChunk('_filter = self._CHEETAH__currentFilter = ' + theFilter.strip() + '(self).filter') else: if theFilter.lower() == 'none': self.addChunk('_filter = self._CHEETAH__initialFilter') else: # is string representing the name of a builtin filter self.addChunk('filterName = ' + repr(theFilter)) self.addChunk('if self._CHEETAH__filters.has_key("' + theFilter + '"):') self.indent() self.addChunk('_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]') self.dedent() self.addChunk('else:') self.indent() self.addChunk('_filter = self._CHEETAH__currentFilter' +' = \\\n\t\t\tself._CHEETAH__filters[filterName] = ' + 'getattr(self._CHEETAH__filtersLib, filterName)(self).filter') self.dedent() def closeFilterBlock(self): ID, filterDetails = self._filterRegionsStack.pop() #self.addChunk('_filter = self._CHEETAH__initialFilter') #self.addChunk('_filter = _orig_filter%(ID)s'%locals()) self.addChunk('_filter = self._CHEETAH__currentFilter = _orig_filter%(ID)s'%locals()) class AutoMethodCompiler(MethodCompiler): def _setupState(self): MethodCompiler._setupState(self) self._argStringList = [ ("self", None) ] self._streamingEnabled = True self._isClassMethod = None self._isStaticMethod = None def _useKWsDictArgForPassingTrans(self): alreadyHasTransArg = [argname for argname, defval in self._argStringList if argname=='trans'] return (self.methodName()!='respond' and not alreadyHasTransArg and self.setting('useKWsDictArgForPassingTrans')) def isClassMethod(self): if self._isClassMethod is None: self._isClassMethod = '@classmethod' in self._decorators return self._isClassMethod def isStaticMethod(self): if self._isStaticMethod is None: self._isStaticMethod = '@staticmethod' in self._decorators return self._isStaticMethod def cleanupState(self): MethodCompiler.cleanupState(self) self.commitStrConst() if self._cacheRegionsStack: self.endCacheRegion() if self._callRegionsStack: self.endCallRegion() if self._streamingEnabled: kwargsName = None positionalArgsListName = None for argname, defval in self._argStringList: if argname.strip().startswith('**'): kwargsName = argname.strip().replace('**', '') break elif argname.strip().startswith('*'): positionalArgsListName = argname.strip().replace('*', '') if not kwargsName and self._useKWsDictArgForPassingTrans(): kwargsName = 'KWS' self.addMethArg('**KWS', None) self._kwargsName = kwargsName if not self._useKWsDictArgForPassingTrans(): if not kwargsName and not positionalArgsListName: self.addMethArg('trans', 'None') else: self._streamingEnabled = False self._indentLev = self.setting('initialMethIndentLevel') mainBodyChunks = self._methodBodyChunks self._methodBodyChunks = [] self._addAutoSetupCode() self._methodBodyChunks.extend(mainBodyChunks) self._addAutoCleanupCode() def _addAutoSetupCode(self): if self._initialMethodComment: self.addChunk(self._initialMethodComment) if self._streamingEnabled and not self.isClassMethod() and not self.isStaticMethod(): if self._useKWsDictArgForPassingTrans() and self._kwargsName: self.addChunk('trans = %s.get("trans")'%self._kwargsName) self.addChunk('if (not trans and not self._CHEETAH__isBuffering' ' and not callable(self.transaction)):') self.indent() self.addChunk('trans = self.transaction' ' # is None unless self.awake() was called') self.dedent() self.addChunk('if not trans:') self.indent() self.addChunk('trans = DummyTransaction()') if self.setting('autoAssignDummyTransactionToSelf'): self.addChunk('self.transaction = trans') self.addChunk('_dummyTrans = True') self.dedent() self.addChunk('else: _dummyTrans = False') else: self.addChunk('trans = DummyTransaction()') self.addChunk('_dummyTrans = True') self.addChunk('write = trans.response().write') if self.setting('useNameMapper'): argNames = [arg[0] for arg in self._argStringList] allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg') if allowSearchListAsMethArg and 'SL' in argNames: pass elif allowSearchListAsMethArg and 'searchList' in argNames: self.addChunk('SL = searchList') elif not self.isClassMethod() and not self.isStaticMethod(): self.addChunk('SL = self._CHEETAH__searchList') else: self.addChunk('SL = [KWS]') if self.setting('useFilters'): if self.isClassMethod() or self.isStaticMethod(): self.addChunk('_filter = lambda x, **kwargs: unicode(x)') else: self.addChunk('_filter = self._CHEETAH__currentFilter') self.addChunk('') self.addChunk("#" *40) self.addChunk('## START - generated method body') self.addChunk('') def _addAutoCleanupCode(self): self.addChunk('') self.addChunk("#" *40) self.addChunk('## END - generated method body') self.addChunk('') if not self._isGenerator: self.addStop() self.addChunk('') def addStop(self, expr=None): self.addChunk('return _dummyTrans and trans.response().getvalue() or ""') def addMethArg(self, name, defVal=None): self._argStringList.append( (name, defVal) ) def methodSignature(self): argStringChunks = [] for arg in self._argStringList: chunk = arg[0] if chunk == 'self' and self.isClassMethod(): chunk = 'cls' if chunk == 'self' and self.isStaticMethod(): # Skip the "self" method for @staticmethod decorators continue if not arg[1] == None: chunk += '=' + arg[1] argStringChunks.append(chunk) argString = (', ').join(argStringChunks) output = [] if self._decorators: output.append(''.join([self._indent + decorator + '\n' for decorator in self._decorators])) output.append(self._indent + "def " + self.methodName() + "(" + argString + "):\n\n") return ''.join(output) ################################################## ## CLASS COMPILERS _initMethod_initCheetah = """\ if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) """.replace('\n', '\n'+' '*8) class ClassCompiler(GenUtils): methodCompilerClass = AutoMethodCompiler methodCompilerClassForInit = MethodCompiler def __init__(self, className, mainMethodName='respond', moduleCompiler=None, fileName=None, settingsManager=None): self._settingsManager = settingsManager self._fileName = fileName self._className = className self._moduleCompiler = moduleCompiler self._mainMethodName = mainMethodName self._setupState() methodCompiler = self._spawnMethodCompiler( mainMethodName, initialMethodComment='## CHEETAH: main method generated for this template') self._setActiveMethodCompiler(methodCompiler) if fileName and self.setting('monitorSrcFile'): self._addSourceFileMonitoring(fileName) def setting(self, key): return self._settingsManager.setting(key) def __getattr__(self, name): """Provide access to the methods and attributes of the MethodCompiler at the top of the activeMethods stack: one-way namespace sharing WARNING: Use .setMethods to assign the attributes of the MethodCompiler from the methods of this class!!! or you will be assigning to attributes of this object instead.""" if name in self.__dict__: return self.__dict__[name] elif hasattr(self.__class__, name): return getattr(self.__class__, name) elif self._activeMethodsList and hasattr(self._activeMethodsList[-1], name): return getattr(self._activeMethodsList[-1], name) else: raise AttributeError(name) def _setupState(self): self._classDef = None self._decoratorsForNextMethod = [] self._activeMethodsList = [] # stack while parsing/generating self._finishedMethodsList = [] # store by order self._methodsIndex = {} # store by name self._baseClass = 'Template' self._classDocStringLines = [] # printed after methods in the gen class def: self._generatedAttribs = ['_CHEETAH__instanceInitialized = False'] self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__') self._generatedAttribs.append( '_CHEETAH_versionTuple = __CHEETAH_versionTuple__') if self.setting('addTimestampsToCompilerOutput'): self._generatedAttribs.append('_CHEETAH_genTime = __CHEETAH_genTime__') self._generatedAttribs.append('_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__') self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__') self._generatedAttribs.append( '_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__') if self.setting('templateMetaclass'): self._generatedAttribs.append('__metaclass__ = '+self.setting('templateMetaclass')) self._initMethChunks = [] self._blockMetaData = {} self._errorCatcherCount = 0 self._placeholderToErrorCatcherMap = {} def cleanupState(self): while self._activeMethodsList: methCompiler = self._popActiveMethodCompiler() self._swallowMethodCompiler(methCompiler) self._setupInitMethod() if self._mainMethodName == 'respond': if self.setting('setup__str__method'): self._generatedAttribs.append('def __str__(self): return self.respond()') self.addAttribute('_mainCheetahMethod_for_' + self._className + '= ' + repr(self._mainMethodName) ) def _setupInitMethod(self): __init__ = self._spawnMethodCompiler('__init__', klass=self.methodCompilerClassForInit) __init__.setMethodSignature("def __init__(self, *args, **KWs)") __init__.addChunk('super(%s, self).__init__(*args, **KWs)' % self._className) __init__.addChunk(_initMethod_initCheetah % {'className' : self._className}) for chunk in self._initMethChunks: __init__.addChunk(chunk) __init__.cleanupState() self._swallowMethodCompiler(__init__, pos=0) def _addSourceFileMonitoring(self, fileName): # @@TR: this stuff needs auditing for Cheetah 2.0 # the first bit is added to init self.addChunkToInit('self._filePath = ' + repr(fileName)) self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)) ) # the rest is added to the main output method of the class ('mainMethod') self.addChunk('if exists(self._filePath) and ' + 'getmtime(self._filePath) > self._fileMtime:') self.indent() self.addChunk('self._compile(file=self._filePath, moduleName='+self._className + ')') self.addChunk( 'write(getattr(self, self._mainCheetahMethod_for_' + self._className + ')(trans=trans))') self.addStop() self.dedent() def setClassName(self, name): self._className = name def className(self): return self._className def setBaseClass(self, baseClassName): self._baseClass = baseClassName def setMainMethodName(self, methodName): if methodName == self._mainMethodName: return ## change the name in the methodCompiler and add new reference mainMethod = self._methodsIndex[self._mainMethodName] mainMethod.setMethodName(methodName) self._methodsIndex[methodName] = mainMethod ## make sure that fileUpdate code still works properly: chunkToChange = ('write(self.' + self._mainMethodName + '(trans=trans))') chunks = mainMethod._methodBodyChunks if chunkToChange in chunks: for i in range(len(chunks)): if chunks[i] == chunkToChange: chunks[i] = ('write(self.' + methodName + '(trans=trans))') ## get rid of the old reference and update self._mainMethodName del self._methodsIndex[self._mainMethodName] self._mainMethodName = methodName def setMainMethodArgs(self, argsList): mainMethodCompiler = self._methodsIndex[self._mainMethodName] for argName, defVal in argsList: mainMethodCompiler.addMethArg(argName, defVal) def _spawnMethodCompiler(self, methodName, klass=None, initialMethodComment=None): if klass is None: klass = self.methodCompilerClass decorators = self._decoratorsForNextMethod or [] self._decoratorsForNextMethod = [] methodCompiler = klass(methodName, classCompiler=self, decorators=decorators, initialMethodComment=initialMethodComment) self._methodsIndex[methodName] = methodCompiler return methodCompiler def _setActiveMethodCompiler(self, methodCompiler): self._activeMethodsList.append(methodCompiler) def _getActiveMethodCompiler(self): return self._activeMethodsList[-1] def _popActiveMethodCompiler(self): return self._activeMethodsList.pop() def _swallowMethodCompiler(self, methodCompiler, pos=None): methodCompiler.cleanupState() if pos==None: self._finishedMethodsList.append( methodCompiler ) else: self._finishedMethodsList.insert(pos, methodCompiler) return methodCompiler def startMethodDef(self, methodName, argsList, parserComment): methodCompiler = self._spawnMethodCompiler( methodName, initialMethodComment=parserComment) self._setActiveMethodCompiler(methodCompiler) for argName, defVal in argsList: methodCompiler.addMethArg(argName, defVal) def _finishedMethods(self): return self._finishedMethodsList def addDecorator(self, decoratorExpr): """Set the decorator to be used with the next method in the source. See _spawnMethodCompiler() and MethodCompiler for the details of how this is used. """ self._decoratorsForNextMethod.append(decoratorExpr) def addClassDocString(self, line): self._classDocStringLines.append( line.replace('%', '%%')) def addChunkToInit(self, chunk): self._initMethChunks.append(chunk) def addAttribute(self, attribExpr): ## first test to make sure that the user hasn't used any fancy Cheetah syntax # (placeholders, directives, etc.) inside the expression if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1: raise ParseError(self, 'Invalid #attr directive.' + ' It should only contain simple Python literals.') ## now add the attribute self._generatedAttribs.append(attribExpr) def addSuper(self, argsList, parserComment=None): className = self._className #self._baseClass methodName = self._getActiveMethodCompiler().methodName() argStringChunks = [] for arg in argsList: chunk = arg[0] if not arg[1] == None: chunk += '=' + arg[1] argStringChunks.append(chunk) argString = ','.join(argStringChunks) self.addFilteredChunk( 'super(%(className)s, self).%(methodName)s(%(argString)s)'%locals()) def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''): if rawCode in self._placeholderToErrorCatcherMap: methodName = self._placeholderToErrorCatcherMap[rawCode] if not self.setting('outputRowColComments'): self._methodsIndex[methodName].addMethDocString( 'plus at line %s, col %s'%lineCol) return methodName self._errorCatcherCount += 1 methodName = '__errorCatcher' + str(self._errorCatcherCount) self._placeholderToErrorCatcherMap[rawCode] = methodName catcherMeth = self._spawnMethodCompiler( methodName, klass=MethodCompiler, initialMethodComment=('## CHEETAH: Generated from ' + rawCode + ' at line %s, col %s'%lineCol + '.') ) catcherMeth.setMethodSignature('def ' + methodName + '(self, localsDict={})') # is this use of localsDict right? catcherMeth.addChunk('try:') catcherMeth.indent() catcherMeth.addChunk("return eval('''" + codeChunk + "''', globals(), localsDict)") catcherMeth.dedent() catcherMeth.addChunk('except self._CHEETAH__errorCatcher.exceptions(), e:') catcherMeth.indent() catcherMeth.addChunk("return self._CHEETAH__errorCatcher.warn(exc_val=e, code= " + repr(codeChunk) + " , rawCode= " + repr(rawCode) + " , lineCol=" + str(lineCol) +")") catcherMeth.cleanupState() self._swallowMethodCompiler(catcherMeth) return methodName def closeDef(self): self.commitStrConst() methCompiler = self._popActiveMethodCompiler() self._swallowMethodCompiler(methCompiler) def closeBlock(self): self.commitStrConst() methCompiler = self._popActiveMethodCompiler() methodName = methCompiler.methodName() if self.setting('includeBlockMarkers'): endMarker = self.setting('blockMarkerEnd') methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1]) self._swallowMethodCompiler(methCompiler) #metaData = self._blockMetaData[methodName] #rawDirective = metaData['raw'] #lineCol = metaData['lineCol'] ## insert the code to call the block, caching if #cache directive is on codeChunk = 'self.' + methodName + '(trans=trans)' self.addChunk(codeChunk) #self.appendToPrevChunk(' # generated from ' + repr(rawDirective) ) #if self.setting('outputRowColComments'): # self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.') ## code wrapping methods def classDef(self): if self._classDef: return self._classDef else: return self.wrapClassDef() __str__ = classDef __unicode__ = classDef def wrapClassDef(self): ind = self.setting('indentationStep') classDefChunks = [self.classSignature(), self.classDocstring(), ] def addMethods(): classDefChunks.extend([ ind + '#'*50, ind + '## CHEETAH GENERATED METHODS', '\n', self.methodDefs(), ]) def addAttributes(): classDefChunks.extend([ ind + '#'*50, ind + '## CHEETAH GENERATED ATTRIBUTES', '\n', self.attributes(), ]) if self.setting('outputMethodsBeforeAttributes'): addMethods() addAttributes() else: addAttributes() addMethods() classDef = '\n'.join(classDefChunks) self._classDef = classDef return classDef def classSignature(self): return "class %s(%s):" % (self.className(), self._baseClass) def classDocstring(self): if not self._classDocStringLines: return '' ind = self.setting('indentationStep') docStr = ('%(ind)s"""\n%(ind)s' + '\n%(ind)s'.join(self._classDocStringLines) + '\n%(ind)s"""\n' ) % {'ind':ind} return docStr def methodDefs(self): methodDefs = [methGen.methodDef() for methGen in self._finishedMethods()] return '\n\n'.join(methodDefs) def attributes(self): attribs = [self.setting('indentationStep') + str(attrib) for attrib in self._generatedAttribs ] return '\n\n'.join(attribs) class AutoClassCompiler(ClassCompiler): pass ################################################## ## MODULE COMPILERS class ModuleCompiler(SettingsManager, GenUtils): parserClass = Parser classCompilerClass = AutoClassCompiler def __init__(self, source=None, file=None, moduleName='DynamicallyCompiledCheetahTemplate', mainClassName=None, # string mainMethodName=None, # string baseclassName=None, # string extraImportStatements=None, # list of strings settings=None # dict ): super(ModuleCompiler, self).__init__() if settings: self.updateSettings(settings) # disable useStackFrames if the C version of NameMapper isn't compiled # it's painfully slow in the Python version and bites Windows users all # the time: if not NameMapper.C_VERSION: if not sys.platform.startswith('java'): warnings.warn( "\nYou don't have the C version of NameMapper installed! " "I'm disabling Cheetah's useStackFrames option as it is " "painfully slow with the Python version of NameMapper. " "You should get a copy of Cheetah with the compiled C version of NameMapper." ) self.setSetting('useStackFrames', False) self._compiled = False self._moduleName = moduleName if not mainClassName: self._mainClassName = moduleName else: self._mainClassName = mainClassName self._mainMethodNameArg = mainMethodName if mainMethodName: self.setSetting('mainMethodName', mainMethodName) self._baseclassName = baseclassName self._filePath = None self._fileMtime = None if source and file: raise TypeError("Cannot compile from a source string AND file.") elif isinstance(file, basestring): # it's a filename. f = open(file) # Raises IOError. source = f.read() f.close() self._filePath = file self._fileMtime = os.path.getmtime(file) elif hasattr(file, 'read'): source = file.read() # Can't set filename or mtime--they're not accessible. elif file: raise TypeError("'file' argument must be a filename string or file-like object") if self._filePath: self._fileDirName, self._fileBaseName = os.path.split(self._filePath) self._fileBaseNameRoot, self._fileBaseNameExt = os.path.splitext(self._fileBaseName) if not isinstance(source, basestring): source = unicode(source) # by converting to string here we allow objects such as other Templates # to be passed in # Handle the #indent directive by converting it to other directives. # (Over the long term we'll make it a real directive.) if source == "": warnings.warn("You supplied an empty string for the source!", ) else: unicodeMatch = unicodeDirectiveRE.search(source) encodingMatch = encodingDirectiveRE.match(source) if unicodeMatch: if encodingMatch: raise ParseError( self, "#encoding and #unicode are mutually exclusive! " "Use one or the other.") source = unicodeDirectiveRE.sub('', source) if isinstance(source, str): encoding = unicodeMatch.group(1) or 'ascii' source = unicode(source, encoding) elif encodingMatch: encodings = encodingMatch.groups() if len(encodings): encoding = encodings[0] source = source.decode(encoding) else: source = unicode(source) if source.find('#indent') != -1: #@@TR: undocumented hack source = indentize(source) self._parser = self.parserClass(source, filename=self._filePath, compiler=self) self._setupCompilerState() def __getattr__(self, name): """Provide one-way access to the methods and attributes of the ClassCompiler, and thereby the MethodCompilers as well. WARNING: Use .setMethods to assign the attributes of the ClassCompiler from the methods of this class!!! or you will be assigning to attributes of this object instead. """ if name in self.__dict__: return self.__dict__[name] elif hasattr(self.__class__, name): return getattr(self.__class__, name) elif self._activeClassesList and hasattr(self._activeClassesList[-1], name): return getattr(self._activeClassesList[-1], name) else: raise AttributeError(name) def _initializeSettings(self): self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS)) def _setupCompilerState(self): self._activeClassesList = [] self._finishedClassesList = [] # listed by ordered self._finishedClassIndex = {} # listed by name self._moduleDef = None self._moduleShBang = '#!/usr/bin/env python' self._moduleEncoding = 'ascii' self._moduleEncodingStr = '' self._moduleHeaderLines = [] self._moduleDocStringLines = [] self._specialVars = {} self._importStatements = [ "import sys", "import os", "import os.path", "import __builtin__", "from os.path import getmtime, exists", "import time", "import types", "from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion", "from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple", "from Cheetah.Template import Template", "from Cheetah.DummyTransaction import *", "from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList", "from Cheetah.CacheRegion import CacheRegion", "import Cheetah.Filters as Filters", "import Cheetah.ErrorCatchers as ErrorCatchers", ] self._importedVarNames = ['sys', 'os', 'os.path', 'time', 'types', 'Template', 'DummyTransaction', 'NotFound', 'Filters', 'ErrorCatchers', 'CacheRegion', ] self._moduleConstants = [ "VFFSL=valueFromFrameOrSearchList", "VFSL=valueFromSearchList", "VFN=valueForName", "currentTime=time.time", ] def compile(self): classCompiler = self._spawnClassCompiler(self._mainClassName) if self._baseclassName: classCompiler.setBaseClass(self._baseclassName) self._addActiveClassCompiler(classCompiler) self._parser.parse() self._swallowClassCompiler(self._popActiveClassCompiler()) self._compiled = True self._parser.cleanup() def _spawnClassCompiler(self, className, klass=None): if klass is None: klass = self.classCompilerClass classCompiler = klass(className, moduleCompiler=self, mainMethodName=self.setting('mainMethodName'), fileName=self._filePath, settingsManager=self, ) return classCompiler def _addActiveClassCompiler(self, classCompiler): self._activeClassesList.append(classCompiler) def _getActiveClassCompiler(self): return self._activeClassesList[-1] def _popActiveClassCompiler(self): return self._activeClassesList.pop() def _swallowClassCompiler(self, classCompiler): classCompiler.cleanupState() self._finishedClassesList.append( classCompiler ) self._finishedClassIndex[classCompiler.className()] = classCompiler return classCompiler def _finishedClasses(self): return self._finishedClassesList def importedVarNames(self): return self._importedVarNames def addImportedVarNames(self, varNames, raw_statement=None): settings = self.settings() if not varNames: return if not settings.get('useLegacyImportMode'): if raw_statement and getattr(self, '_methodBodyChunks'): self.addChunk(raw_statement) else: self._importedVarNames.extend(varNames) ## methods for adding stuff to the module and class definitions def setBaseClass(self, baseClassName): if self._mainMethodNameArg: self.setMainMethodName(self._mainMethodNameArg) else: self.setMainMethodName(self.setting('mainMethodNameForSubclasses')) if self.setting('handlerForExtendsDirective'): handler = self.setting('handlerForExtendsDirective') baseClassName = handler(compiler=self, baseClassName=baseClassName) self._getActiveClassCompiler().setBaseClass(baseClassName) elif (not self.setting('autoImportForExtendsDirective') or baseClassName=='object' or baseClassName in self.importedVarNames()): self._getActiveClassCompiler().setBaseClass(baseClassName) # no need to import else: ################################################## ## If the #extends directive contains a classname or modulename that isn't # in self.importedVarNames() already, we assume that we need to add # an implied 'from ModName import ClassName' where ModName == ClassName. # - This is the case in WebKit servlet modules. # - We also assume that the final . separates the classname from the # module name. This might break if people do something really fancy # with their dots and namespaces. baseclasses = baseClassName.split(',') for klass in baseclasses: chunks = klass.split('.') if len(chunks)==1: self._getActiveClassCompiler().setBaseClass(klass) if klass not in self.importedVarNames(): modName = klass # we assume the class name to be the module name # and that it's not a builtin: importStatement = "from %s import %s" % (modName, klass) self.addImportStatement(importStatement) self.addImportedVarNames((klass,)) else: needToAddImport = True modName = chunks[0] #print chunks, ':', self.importedVarNames() for chunk in chunks[1:-1]: if modName in self.importedVarNames(): needToAddImport = False finalBaseClassName = klass.replace(modName+'.', '') self._getActiveClassCompiler().setBaseClass(finalBaseClassName) break else: modName += '.'+chunk if needToAddImport: modName, finalClassName = '.'.join(chunks[:-1]), chunks[-1] #if finalClassName != chunks[:-1][-1]: if finalClassName != chunks[-2]: # we assume the class name to be the module name modName = '.'.join(chunks) self._getActiveClassCompiler().setBaseClass(finalClassName) importStatement = "from %s import %s" % (modName, finalClassName) self.addImportStatement(importStatement) self.addImportedVarNames( [finalClassName,] ) def setCompilerSetting(self, key, valueExpr): self.setSetting(key, eval(valueExpr) ) self._parser.configureParser() def setCompilerSettings(self, keywords, settingsStr): KWs = keywords merge = True if 'nomerge' in KWs: merge = False if 'reset' in KWs: # @@TR: this is actually caught by the parser at the moment. # subject to change in the future self._initializeSettings() self._parser.configureParser() return elif 'python' in KWs: settingsReader = self.updateSettingsFromPySrcStr # this comes from SettingsManager else: # this comes from SettingsManager settingsReader = self.updateSettingsFromConfigStr settingsReader(settingsStr) self._parser.configureParser() def setShBang(self, shBang): self._moduleShBang = shBang def setModuleEncoding(self, encoding): self._moduleEncoding = encoding def getModuleEncoding(self): return self._moduleEncoding def addModuleHeader(self, line): """Adds a header comment to the top of the generated module. """ self._moduleHeaderLines.append(line) def addModuleDocString(self, line): """Adds a line to the generated module docstring. """ self._moduleDocStringLines.append(line) def addModuleGlobal(self, line): """Adds a line of global module code. It is inserted after the import statements and Cheetah default module constants. """ self._moduleConstants.append(line) def addSpecialVar(self, basename, contents, includeUnderscores=True): """Adds module __specialConstant__ to the module globals. """ name = includeUnderscores and '__'+basename+'__' or basename self._specialVars[name] = contents.strip() def addImportStatement(self, impStatement): settings = self.settings() if not self._methodBodyChunks or settings.get('useLegacyImportMode'): # In the case where we are importing inline in the middle of a source block # we don't want to inadvertantly import the module at the top of the file either self._importStatements.append(impStatement) #@@TR 2005-01-01: there's almost certainly a cleaner way to do this! importVarNames = impStatement[impStatement.find('import') + len('import'):].split(',') importVarNames = [var.split()[-1] for var in importVarNames] # handles aliases importVarNames = [var for var in importVarNames if not var == '*'] self.addImportedVarNames(importVarNames, raw_statement=impStatement) #used by #extend for auto-imports def addAttribute(self, attribName, expr): self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr) def addComment(self, comm): if re.match(r'#+$', comm): # skip bar comments return specialVarMatch = specialVarRE.match(comm) if specialVarMatch: # @@TR: this is a bit hackish and is being replaced with # #set module varName = ... return self.addSpecialVar(specialVarMatch.group(1), comm[specialVarMatch.end():]) elif comm.startswith('doc:'): addLine = self.addMethDocString comm = comm[len('doc:'):].strip() elif comm.startswith('doc-method:'): addLine = self.addMethDocString comm = comm[len('doc-method:'):].strip() elif comm.startswith('doc-module:'): addLine = self.addModuleDocString comm = comm[len('doc-module:'):].strip() elif comm.startswith('doc-class:'): addLine = self.addClassDocString comm = comm[len('doc-class:'):].strip() elif comm.startswith('header:'): addLine = self.addModuleHeader comm = comm[len('header:'):].strip() else: addLine = self.addMethComment for line in comm.splitlines(): addLine(line) ## methods for module code wrapping def getModuleCode(self): if not self._compiled: self.compile() if self._moduleDef: return self._moduleDef else: return self.wrapModuleDef() __str__ = getModuleCode def wrapModuleDef(self): self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg')) self.addModuleGlobal('__CHEETAH_version__ = %r'%Version) self.addModuleGlobal('__CHEETAH_versionTuple__ = %r'%(VersionTuple,)) if self.setting('addTimestampsToCompilerOutput'): self.addModuleGlobal('__CHEETAH_genTime__ = %r'%time.time()) self.addModuleGlobal('__CHEETAH_genTimestamp__ = %r'%self.timestamp()) if self._filePath: timestamp = self.timestamp(self._fileMtime) self.addModuleGlobal('__CHEETAH_src__ = %r'%self._filePath) self.addModuleGlobal('__CHEETAH_srcLastModified__ = %r'%timestamp) else: self.addModuleGlobal('__CHEETAH_src__ = None') self.addModuleGlobal('__CHEETAH_srcLastModified__ = None') moduleDef = """%(header)s %(docstring)s ################################################## ## DEPENDENCIES %(imports)s ################################################## ## MODULE CONSTANTS %(constants)s %(specialVars)s if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple: raise AssertionError( 'This template was compiled with Cheetah version' ' %%s. Templates compiled before version %%s must be recompiled.'%%( __CHEETAH_version__, RequiredCheetahVersion)) ################################################## ## CLASSES %(classes)s ## END CLASS DEFINITION if not hasattr(%(mainClassName)s, '_initCheetahAttributes'): templateAPIClass = getattr(%(mainClassName)s, '_CHEETAH_templateClass', Template) templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s) %(footer)s """ % {'header': self.moduleHeader(), 'docstring': self.moduleDocstring(), 'specialVars': self.specialVars(), 'imports': self.importStatements(), 'constants': self.moduleConstants(), 'classes': self.classDefs(), 'footer': self.moduleFooter(), 'mainClassName': self._mainClassName, } self._moduleDef = moduleDef return moduleDef def timestamp(self, theTime=None): if not theTime: theTime = time.time() return time.asctime(time.localtime(theTime)) def moduleHeader(self): header = self._moduleShBang + '\n' header += self._moduleEncodingStr + '\n' if self._moduleHeaderLines: offSet = self.setting('commentOffset') header += ( '#' + ' '*offSet + ('\n#'+ ' '*offSet).join(self._moduleHeaderLines) + '\n') return header def moduleDocstring(self): if not self._moduleDocStringLines: return '' return ('"""' + '\n'.join(self._moduleDocStringLines) + '\n"""\n') def specialVars(self): chunks = [] theVars = self._specialVars keys = sorted(theVars.keys()) for key in keys: chunks.append(key + ' = ' + repr(theVars[key]) ) return '\n'.join(chunks) def importStatements(self): return '\n'.join(self._importStatements) def moduleConstants(self): return '\n'.join(self._moduleConstants) def classDefs(self): classDefs = [klass.classDef() for klass in self._finishedClasses()] return '\n\n'.join(classDefs) def moduleFooter(self): return """ # CHEETAH was developed by Tavis Rudd and Mike Orr # with code, advice and input from many other volunteers. # For more information visit http://www.CheetahTemplate.org/ ################################################## ## if run from command line: if __name__ == '__main__': from Cheetah.TemplateCmdLineIface import CmdLineIface CmdLineIface(templateObj=%(className)s()).run() """ % {'className':self._mainClassName} ################################################## ## Make Compiler an alias for ModuleCompiler Compiler = ModuleCompiler
[ [ 8, 0, 0.0025, 0.0045, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0055, 0.0005, 0, 0.66, 0.0312, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.006, 0.0005, 0, 0.66,...
[ "'''\n Compiler classes for Cheetah:\n ModuleCompiler aka 'Compiler'\n ClassCompiler\n MethodCompiler\n\n If you are trying to grok this code start with ModuleCompiler.__init__,\n ModuleCompiler.compile, and ModuleCompiler.__getattr__.", "import sys", "import os", "import os.path", "from o...
"""SourceReader class for Cheetah's Parser and CodeGenerator """ import re import sys EOLre = re.compile(r'[ \f\t]*(?:\r\n|\r|\n)') EOLZre = re.compile(r'(?:\r\n|\r|\n|\Z)') ENCODINGsearch = re.compile("coding[=:]\s*([-\w.]+)").search class Error(Exception): pass class SourceReader(object): def __init__(self, src, filename=None, breakPoint=None, encoding=None): ## @@TR 2005-01-17: the following comes from a patch Terrel Shumway ## contributed to add unicode support to the reading of Cheetah source ## files with dynamically compiled templates. All the existing unit ## tests pass but, it needs more testing and some test cases of its ## own. My instinct is to move this up into the code that passes in the ## src string rather than leaving it here. As implemented here it ## forces all src strings to unicode, which IMO is not what we want. # if encoding is None: # # peek at the encoding in the first two lines # m = EOLZre.search(src) # pos = m.end() # if pos<len(src): # m = EOLZre.search(src,pos) # pos = m.end() # m = ENCODINGsearch(src,0,pos) # if m: # encoding = m.group(1) # else: # encoding = sys.getfilesystemencoding() # self._encoding = encoding # if type(src) is not unicode: # src = src.decode(encoding) ## end of Terrel's patch self._src = src self._filename = filename self._srcLen = len(src) if breakPoint == None: self._breakPoint = self._srcLen else: self.setBreakPoint(breakPoint) self._pos = 0 self._bookmarks = {} self._posTobookmarkMap = {} ## collect some meta-information self._EOLs = [] pos = 0 while pos < len(self): EOLmatch = EOLZre.search(src, pos) self._EOLs.append(EOLmatch.start()) pos = EOLmatch.end() self._BOLs = [] for pos in self._EOLs: BOLpos = self.findBOL(pos) self._BOLs.append(BOLpos) def src(self): return self._src def filename(self): return self._filename def __len__(self): return self._breakPoint def __getitem__(self, i): self.checkPos(i) return self._src[i] def __getslice__(self, i, j): i = max(i, 0); j = max(j, 0) return self._src[i:j] def splitlines(self): if not hasattr(self, '_srcLines'): self._srcLines = self._src.splitlines() return self._srcLines def lineNum(self, pos=None): if pos == None: pos = self._pos for i in range(len(self._BOLs)): if pos >= self._BOLs[i] and pos <= self._EOLs[i]: return i def getRowCol(self, pos=None): if pos == None: pos = self._pos lineNum = self.lineNum(pos) BOL, EOL = self._BOLs[lineNum], self._EOLs[lineNum] return lineNum+1, pos-BOL+1 def getRowColLine(self, pos=None): if pos == None: pos = self._pos row, col = self.getRowCol(pos) return row, col, self.splitlines()[row-1] def getLine(self, pos): if pos == None: pos = self._pos lineNum = self.lineNum(pos) return self.splitlines()[lineNum] def pos(self): return self._pos def setPos(self, pos): self.checkPos(pos) self._pos = pos def validPos(self, pos): return pos <= self._breakPoint and pos >=0 def checkPos(self, pos): if not pos <= self._breakPoint: raise Error("pos (" + str(pos) + ") is invalid: beyond the stream's end (" + str(self._breakPoint-1) + ")" ) elif not pos >=0: raise Error("pos (" + str(pos) + ") is invalid: less than 0" ) def breakPoint(self): return self._breakPoint def setBreakPoint(self, pos): if pos > self._srcLen: raise Error("New breakpoint (" + str(pos) + ") is invalid: beyond the end of stream's source string (" + str(self._srcLen) + ")" ) elif not pos >= 0: raise Error("New breakpoint (" + str(pos) + ") is invalid: less than 0" ) self._breakPoint = pos def setBookmark(self, name): self._bookmarks[name] = self._pos self._posTobookmarkMap[self._pos] = name def hasBookmark(self, name): return name in self._bookmarks def gotoBookmark(self, name): if not self.hasBookmark(name): raise Error("Invalid bookmark (" + name + ") is invalid: does not exist") pos = self._bookmarks[name] if not self.validPos(pos): raise Error("Invalid bookmark (" + name + ', '+ str(pos) + ") is invalid: pos is out of range" ) self._pos = pos def atEnd(self): return self._pos >= self._breakPoint def atStart(self): return self._pos == 0 def peek(self, offset=0): self.checkPos(self._pos+offset) pos = self._pos + offset return self._src[pos] def getc(self): pos = self._pos if self.validPos(pos+1): self._pos += 1 return self._src[pos] def ungetc(self, c=None): if not self.atStart(): raise Error('Already at beginning of stream') self._pos -= 1 if not c==None: self._src[self._pos] = c def advance(self, offset=1): self.checkPos(self._pos + offset) self._pos += offset def rev(self, offset=1): self.checkPos(self._pos - offset) self._pos -= offset def read(self, offset): self.checkPos(self._pos + offset) start = self._pos self._pos += offset return self._src[start:self._pos] def readTo(self, to, start=None): self.checkPos(to) if start == None: start = self._pos self._pos = to return self._src[start:to] def readToEOL(self, start=None, gobble=True): EOLmatch = EOLZre.search(self.src(), self.pos()) if gobble: pos = EOLmatch.end() else: pos = EOLmatch.start() return self.readTo(to=pos, start=start) def find(self, it, pos=None): if pos == None: pos = self._pos return self._src.find(it, pos ) def startswith(self, it, pos=None): if self.find(it, pos) == self.pos(): return True else: return False def rfind(self, it, pos): if pos == None: pos = self._pos return self._src.rfind(it, pos) def findBOL(self, pos=None): if pos == None: pos = self._pos src = self.src() return max(src.rfind('\n', 0, pos)+1, src.rfind('\r', 0, pos)+1, 0) def findEOL(self, pos=None, gobble=False): if pos == None: pos = self._pos match = EOLZre.search(self.src(), pos) if gobble: return match.end() else: return match.start() def isLineClearToPos(self, pos=None): if pos == None: pos = self.pos() self.checkPos(pos) src = self.src() BOL = self.findBOL() return BOL == pos or src[BOL:pos].isspace() def matches(self, strOrRE): if isinstance(strOrRE, (str, unicode)): return self.startswith(strOrRE, pos=self.pos()) else: # assume an re object return strOrRE.match(self.src(), self.pos()) def matchWhiteSpace(self, WSchars=' \f\t'): return (not self.atEnd()) and self.peek() in WSchars def getWhiteSpace(self, max=None, WSchars=' \f\t'): if not self.matchWhiteSpace(WSchars): return '' start = self.pos() breakPoint = self.breakPoint() if max is not None: breakPoint = min(breakPoint, self.pos()+max) while self.pos() < breakPoint: self.advance() if not self.matchWhiteSpace(WSchars): break return self.src()[start:self.pos()] def matchNonWhiteSpace(self, WSchars=' \f\t\n\r'): return self.atEnd() or not self.peek() in WSchars def getNonWhiteSpace(self, WSchars=' \f\t\n\r'): if not self.matchNonWhiteSpace(WSchars): return '' start = self.pos() while self.pos() < self.breakPoint(): self.advance() if not self.matchNonWhiteSpace(WSchars): break return self.src()[start:self.pos()]
[ [ 8, 0, 0.0052, 0.0069, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0103, 0.0034, 0, 0.66, 0.1429, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.0138, 0.0034, 0, 0.66...
[ "\"\"\"SourceReader class for Cheetah's Parser and CodeGenerator\n\"\"\"", "import re", "import sys", "EOLre = re.compile(r'[ \\f\\t]*(?:\\r\\n|\\r|\\n)')", "EOLZre = re.compile(r'(?:\\r\\n|\\r|\\n|\\Z)')", "ENCODINGsearch = re.compile(\"coding[=:]\\s*([-\\w.]+)\").search", "class Error(Exception):\n ...
""" Indentation maker. @@TR: this code is unsupported and largely undocumented ... This version is based directly on code by Robert Kuzelj <robert_kuzelj@yahoo.com> and uses his directive syntax. Some classes and attributes have been renamed. Indentation is output via $self._CHEETAH__indenter.indent() to prevent '_indenter' being looked up on the searchList and another one being found. The directive syntax will soon be changed somewhat. """ import re import sys def indentize(source): return IndentProcessor().process(source) class IndentProcessor(object): """Preprocess #indent tags.""" LINE_SEP = '\n' ARGS = "args" INDENT_DIR = re.compile(r'[ \t]*#indent[ \t]*(?P<args>.*)') DIRECTIVE = re.compile(r"[ \t]*#") WS = "ws" WHITESPACES = re.compile(r"(?P<ws>[ \t]*)") INC = "++" DEC = "--" SET = "=" CHAR = "char" ON = "on" OFF = "off" PUSH = "push" POP = "pop" def process(self, _txt): result = [] for line in _txt.splitlines(): match = self.INDENT_DIR.match(line) if match: #is indention directive args = match.group(self.ARGS).strip() if args == self.ON: line = "#silent $self._CHEETAH__indenter.on()" elif args == self.OFF: line = "#silent $self._CHEETAH__indenter.off()" elif args == self.INC: line = "#silent $self._CHEETAH__indenter.inc()" elif args == self.DEC: line = "#silent $self._CHEETAH__indenter.dec()" elif args.startswith(self.SET): level = int(args[1:]) line = "#silent $self._CHEETAH__indenter.setLevel(%(level)d)" % {"level":level} elif args.startswith('chars'): self.indentChars = eval(args.split('=')[1]) line = "#silent $self._CHEETAH__indenter.setChars(%(level)d)" % {"level":level} elif args.startswith(self.PUSH): line = "#silent $self._CHEETAH__indenter.push()" elif args.startswith(self.POP): line = "#silent $self._CHEETAH__indenter.pop()" else: match = self.DIRECTIVE.match(line) if not match: #is not another directive match = self.WHITESPACES.match(line) if match: size = len(match.group("ws").expandtabs(4)) line = ("${self._CHEETAH__indenter.indent(%(size)d)}" % {"size":size}) + line.lstrip() else: line = "${self._CHEETAH__indenter.indent(0)}" + line result.append(line) return self.LINE_SEP.join(result) class Indenter(object): """ A class that keeps track of the current indentation level. .indent() returns the appropriate amount of indentation. """ On = 1 Level = 0 Chars = ' ' LevelStack = [] def on(self): self.On = 1 def off(self): self.On = 0 def inc(self): self.Level += 1 def dec(self): """decrement can only be applied to values greater zero values below zero don't make any sense at all!""" if self.Level > 0: self.Level -= 1 def push(self): self.LevelStack.append(self.Level) def pop(self): """the levestack can not become -1. any attempt to do so sets the level to 0!""" if len(self.LevelStack) > 0: self.Level = self.LevelStack.pop() else: self.Level = 0 def setLevel(self, _level): """the leve can't be less than zero. any attempt to do so sets the level automatically to zero!""" if _level < 0: self.Level = 0 else: self.Level = _level def setChar(self, _chars): self.Chars = _chars def indent(self, _default=0): if self.On: return self.Chars * self.Level return " " * _default
[ [ 8, 0, 0.0488, 0.0894, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1057, 0.0081, 0, 0.66, 0.2, 540, 0, 1, 0, 0, 540, 0, 0 ], [ 1, 0, 0.1138, 0.0081, 0, 0.66, ...
[ "\"\"\"\nIndentation maker.\n@@TR: this code is unsupported and largely undocumented ...\n\nThis version is based directly on code by Robert Kuzelj\n<robert_kuzelj@yahoo.com> and uses his directive syntax. Some classes and\nattributes have been renamed. Indentation is output via\n$self._CHEETAH__indenter.indent()...
## statprof.py ## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com> ## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org> ## This library is free software; you can redistribute it and/or ## modify it under the terms of the GNU Lesser General Public ## License as published by the Free Software Foundation; either ## version 2.1 of the License, or (at your option) any later version. ## ## This library is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public ## License along with this program; if not, contact: ## ## Free Software Foundation Voice: +1-617-542-5942 ## 59 Temple Place - Suite 330 Fax: +1-617-542-2652 ## Boston, MA 02111-1307, USA gnu@gnu.org """ statprof is intended to be a fairly simple statistical profiler for python. It was ported directly from a statistical profiler for guile, also named statprof, available from guile-lib [0]. [0] http://wingolog.org/software/guile-lib/statprof/ To start profiling, call statprof.start(): >>> start() Then run whatever it is that you want to profile, for example: >>> import test.pystone; test.pystone.pystones() Then stop the profiling and print out the results: >>> stop() >>> display() % cumulative self time seconds seconds name 26.72 1.40 0.37 pystone.py:79:Proc0 13.79 0.56 0.19 pystone.py:133:Proc1 13.79 0.19 0.19 pystone.py:208:Proc8 10.34 0.16 0.14 pystone.py:229:Func2 6.90 0.10 0.10 pystone.py:45:__init__ 4.31 0.16 0.06 pystone.py:53:copy ... All of the numerical data with the exception of the calls column is statistically approximate. In the following column descriptions, and in all of statprof, "time" refers to execution time (both user and system), not wall clock time. % time The percent of the time spent inside the procedure itself (not counting children). cumulative seconds The total number of seconds spent in the procedure, including children. self seconds The total number of seconds spent in the procedure itself (not counting children). name The name of the procedure. By default statprof keeps the data collected from previous runs. If you want to clear the collected data, call reset(): >>> reset() reset() can also be used to change the sampling frequency. For example, to tell statprof to sample 50 times a second: >>> reset(50) This means that statprof will sample the call stack after every 1/50 of a second of user + system time spent running on behalf of the python process. When your process is idle (for example, blocking in a read(), as is the case at the listener), the clock does not advance. For this reason statprof is not currently not suitable for profiling io-bound operations. The profiler uses the hash of the code object itself to identify the procedures, so it won't confuse different procedures with the same name. They will show up as two different rows in the output. Right now the profiler is quite simplistic. I cannot provide call-graphs or other higher level information. What you see in the table is pretty much all there is. Patches are welcome :-) Threading --------- Because signals only get delivered to the main thread in Python, statprof only profiles the main thread. However because the time reporting function uses per-process timers, the results can be significantly off if other threads' work patterns are not similar to the main thread's work patterns. Implementation notes -------------------- The profiler works by setting the unix profiling signal ITIMER_PROF to go off after the interval you define in the call to reset(). When the signal fires, a sampling routine is run which looks at the current procedure that's executing, and then crawls up the stack, and for each frame encountered, increments that frame's code object's sample count. Note that if a procedure is encountered multiple times on a given stack, it is only counted once. After the sampling is complete, the profiler resets profiling timer to fire again after the appropriate interval. Meanwhile, the profiler keeps track, via os.times(), how much CPU time (system and user -- which is also what ITIMER_PROF tracks), has elapsed while code has been executing within a start()/stop() block. The profiler also tries to avoid counting or timing its own code as much as possible. """ try: import itimer except ImportError: raise ImportError('''statprof requires the itimer python extension. To install it, enter the following commands from a terminal: wget http://www.cute.fi/~torppa/py-itimer/py-itimer.tar.gz tar zxvf py-itimer.tar.gz cd py-itimer sudo python setup.py install ''') import signal import os __all__ = ['start', 'stop', 'reset', 'display'] ########################################################################### ## Utils def clock(): times = os.times() return times[0] + times[1] ########################################################################### ## Collection data structures class ProfileState(object): def __init__(self, frequency=None): self.reset(frequency) def reset(self, frequency=None): # total so far self.accumulated_time = 0.0 # start_time when timer is active self.last_start_time = None # total count of sampler calls self.sample_count = 0 # a float if frequency: self.sample_interval = 1.0/frequency elif not hasattr(self, 'sample_interval'): # default to 100 Hz self.sample_interval = 1.0/100.0 else: # leave the frequency as it was pass self.remaining_prof_time = None # for user start/stop nesting self.profile_level = 0 # whether to catch apply-frame self.count_calls = False # gc time between start() and stop() self.gc_time_taken = 0 def accumulate_time(self, stop_time): self.accumulated_time += stop_time - self.last_start_time state = ProfileState() ## call_data := { code object: CallData } call_data = {} class CallData(object): def __init__(self, code): self.name = code.co_name self.filename = code.co_filename self.lineno = code.co_firstlineno self.call_count = 0 self.cum_sample_count = 0 self.self_sample_count = 0 call_data[code] = self def get_call_data(code): return call_data.get(code, None) or CallData(code) ########################################################################### ## SIGPROF handler def sample_stack_procs(frame): state.sample_count += 1 get_call_data(frame.f_code).self_sample_count += 1 code_seen = {} while frame: code_seen[frame.f_code] = True frame = frame.f_back for code in code_seen.iterkeys(): get_call_data(code).cum_sample_count += 1 def profile_signal_handler(signum, frame): if state.profile_level > 0: state.accumulate_time(clock()) sample_stack_procs(frame) itimer.setitimer(itimer.ITIMER_PROF, state.sample_interval, 0.0) state.last_start_time = clock() ########################################################################### ## Profiling API def is_active(): return state.profile_level > 0 def start(): state.profile_level += 1 if state.profile_level == 1: state.last_start_time = clock() rpt = state.remaining_prof_time state.remaining_prof_time = None signal.signal(signal.SIGPROF, profile_signal_handler) itimer.setitimer(itimer.ITIMER_PROF, rpt or state.sample_interval, 0.0) state.gc_time_taken = 0 # dunno def stop(): state.profile_level -= 1 if state.profile_level == 0: state.accumulate_time(clock()) state.last_start_time = None rpt = itimer.setitimer(itimer.ITIMER_PROF, 0.0, 0.0) signal.signal(signal.SIGPROF, signal.SIG_IGN) state.remaining_prof_time = rpt[0] state.gc_time_taken = 0 # dunno def reset(frequency=None): assert state.profile_level == 0, "Can't reset() while statprof is running" call_data.clear() state.reset(frequency) ########################################################################### ## Reporting API class CallStats(object): def __init__(self, call_data): self_samples = call_data.self_sample_count cum_samples = call_data.cum_sample_count nsamples = state.sample_count secs_per_sample = state.accumulated_time / nsamples basename = os.path.basename(call_data.filename) self.name = '%s:%d:%s' % (basename, call_data.lineno, call_data.name) self.pcnt_time_in_proc = self_samples / nsamples * 100 self.cum_secs_in_proc = cum_samples * secs_per_sample self.self_secs_in_proc = self_samples * secs_per_sample self.num_calls = None self.self_secs_per_call = None self.cum_secs_per_call = None def display(self): print('%6.2f %9.2f %9.2f %s' % (self.pcnt_time_in_proc, self.cum_secs_in_proc, self.self_secs_in_proc, self.name)) def display(): if state.sample_count == 0: print('No samples recorded.') return l = [CallStats(x) for x in call_data.itervalues()] l = [(x.self_secs_in_proc, x.cum_secs_in_proc, x) for x in l] l.sort(reverse=True) l = [x[2] for x in l] print('%5.5s %10.10s %7.7s %-8.8s' % ('% ', 'cumulative', 'self', '')) print('%5.5s %9.9s %8.8s %-8.8s' % ("time", "seconds", "seconds", "name")) for x in l: x.display() print('---') print('Sample count: %d' % state.sample_count) print('Total time: %f seconds' % state.accumulated_time)
[ [ 8, 0, 0.2336, 0.3257, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 7, 0, 0.4276, 0.0362, 0, 0.66, 0.0556, 0, 0, 1, 0, 0, 0, 0, 1 ], [ 1, 1, 0.4145, 0.0033, 1, 0.04, ...
[ "\"\"\"\nstatprof is intended to be a fairly simple statistical profiler for\npython. It was ported directly from a statistical profiler for guile,\nalso named statprof, available from guile-lib [0].\n\n[0] http://wingolog.org/software/guile-lib/statprof/\n\nTo start profiling, call statprof.start():", "try:\n ...
"""This is a copy of the htmlDecode function in Webware. @@TR: It implemented more efficiently. """ from Cheetah.Utils.htmlEncode import htmlCodesReversed def htmlDecode(s, codes=htmlCodesReversed): """ Returns the ASCII decoded version of the given HTML string. This does NOT remove normal HTML tags like <p>. It is the inverse of htmlEncode().""" for code in codes: s = s.replace(code[1], code[0]) return s
[ [ 8, 0, 0.2143, 0.3571, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.5, 0.0714, 0, 0.66, 0.5, 756, 0, 1, 0, 0, 756, 0, 0 ], [ 2, 0, 0.8214, 0.4286, 0, 0.66, ...
[ "\"\"\"This is a copy of the htmlDecode function in Webware.\n\n@@TR: It implemented more efficiently.\n\n\"\"\"", "from Cheetah.Utils.htmlEncode import htmlCodesReversed", "def htmlDecode(s, codes=htmlCodesReversed):\n \"\"\" Returns the ASCII decoded version of the given HTML string. This does\n NOT rem...
#!/usr/bin/env python """ Miscellaneous functions/objects used by Cheetah but also useful standalone. """ import os # Used in mkdirsWithPyInitFile. import sys # Used in die. ################################################## ## MISCELLANEOUS FUNCTIONS def die(reason): sys.stderr.write(reason + '\n') sys.exit(1) def useOrRaise(thing, errmsg=''): """Raise 'thing' if it's a subclass of Exception. Otherwise return it. Called by: Cheetah.Servlet.cgiImport() """ if isinstance(thing, type) and issubclass(thing, Exception): raise thing(errmsg) return thing def checkKeywords(dic, legalKeywords, what='argument'): """Verify no illegal keyword arguments were passed to a function. in : dic, dictionary (**kw in the calling routine). legalKeywords, list of strings, the keywords that are allowed. what, string, suffix for error message (see function source). out: None. exc: TypeError if 'dic' contains a key not in 'legalKeywords'. called by: Cheetah.Template.__init__() """ # XXX legalKeywords could be a set when sets get added to Python. for k in dic.keys(): # Can be dic.iterkeys() if Python >= 2.2. if k not in legalKeywords: raise TypeError("'%s' is not a valid %s" % (k, what)) def removeFromList(list_, *elements): """Save as list_.remove(each element) but don't raise an error if element is missing. Modifies 'list_' in place! Returns None. """ for elm in elements: try: list_.remove(elm) except ValueError: pass def mkdirsWithPyInitFiles(path): """Same as os.makedirs (mkdir 'path' and all missing parent directories) but also puts a Python '__init__.py' file in every directory it creates. Does nothing (without creating an '__init__.py' file) if the directory already exists. """ dir, fil = os.path.split(path) if dir and not os.path.exists(dir): mkdirsWithPyInitFiles(dir) if not os.path.exists(path): os.mkdir(path) init = os.path.join(path, "__init__.py") f = open(init, 'w') # Open and close to produce empty file. f.close() # vim: shiftwidth=4 tabstop=4 expandtab
[ [ 8, 0, 0.0448, 0.0448, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0746, 0.0149, 0, 0.66, 0.1429, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0896, 0.0149, 0, 0.66...
[ "\"\"\"\n Miscellaneous functions/objects used by Cheetah but also useful standalone.\n\"\"\"", "import os # Used in mkdirsWithPyInitFile.", "import sys # Used in die.", "def die(reason):\n sys.stderr.write(reason + '\\n')\n sys.exit(1)", " sys.stderr.write(reason + '\\n')", "...
# $Id: WebInputMixin.py,v 1.10 2006/01/06 21:56:54 tavis_rudd Exp $ """Provides helpers for Template.webInput(), a method for importing web transaction variables in bulk. See the docstring of webInput for full details. Meta-Data ================================================================================ Author: Mike Orr <iron@mso.oz.net> License: This software is released for unlimited distribution under the terms of the MIT license. See the LICENSE file. Version: $Revision: 1.10 $ Start Date: 2002/03/17 Last Revision Date: $Date: 2006/01/06 21:56:54 $ """ __author__ = "Mike Orr <iron@mso.oz.net>" __revision__ = "$Revision: 1.10 $"[11:-2] from Cheetah.Utils.Misc import useOrRaise class NonNumericInputError(ValueError): pass ################################################## ## PRIVATE FUNCTIONS AND CLASSES class _Converter: """A container object for info about type converters. .name, string, name of this converter (for error messages). .func, function, factory function. .default, value to use or raise if the real value is missing. .error, value to use or raise if .func() raises an exception. """ def __init__(self, name, func, default, error): self.name = name self.func = func self.default = default self.error = error def _lookup(name, func, multi, converters): """Look up a Webware field/cookie/value/session value. Return '(realName, value)' where 'realName' is like 'name' but with any conversion suffix strips off. Applies numeric conversion and single vs multi values according to the comments in the source. """ # Step 1 -- split off the conversion suffix from 'name'; e.g. "height:int". # If there's no colon, the suffix is "". 'longName' is the name with the # suffix, 'shortName' is without. # XXX This implementation assumes "height:" means "height". colon = name.find(':') if colon != -1: longName = name shortName, ext = name[:colon], name[colon+1:] else: longName = shortName = name ext = '' # Step 2 -- look up the values by calling 'func'. if longName != shortName: values = func(longName, None) or func(shortName, None) else: values = func(shortName, None) # 'values' is a list of strings, a string or None. # Step 3 -- Coerce 'values' to a list of zero, one or more strings. if values is None: values = [] elif isinstance(values, str): values = [values] # Step 4 -- Find a _Converter object or raise TypeError. try: converter = converters[ext] except KeyError: fmt = "'%s' is not a valid converter name in '%s'" tup = (ext, longName) raise TypeError(fmt % tup) # Step 5 -- if there's a converter func, run it on each element. # If the converter raises an exception, use or raise 'converter.error'. if converter.func is not None: tmp = values[:] values = [] for elm in tmp: try: elm = converter.func(elm) except (TypeError, ValueError): tup = converter.name, elm errmsg = "%s '%s' contains invalid characters" % tup elm = useOrRaise(converter.error, errmsg) values.append(elm) # 'values' is now a list of strings, ints or floats. # Step 6 -- If we're supposed to return a multi value, return the list # as is. If we're supposed to return a single value and the list is # empty, return or raise 'converter.default'. Otherwise, return the # first element in the list and ignore any additional values. if multi: return shortName, values if len(values) == 0: return shortName, useOrRaise(converter.default) return shortName, values[0] # vim: sw=4 ts=4 expandtab
[ [ 8, 0, 0.0735, 0.1176, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1373, 0.0098, 0, 0.66, 0.1667, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.1471, 0.0098, 0, 0.66...
[ "\"\"\"Provides helpers for Template.webInput(), a method for importing web\ntransaction variables in bulk. See the docstring of webInput for full details.\n\nMeta-Data\n================================================================================\nAuthor: Mike Orr <iron@mso.oz.net>\nLicense: This software is r...
#
[]
[]
"""This is a copy of the htmlEncode function in Webware. @@TR: It implemented more efficiently. """ htmlCodes = [ ['&', '&amp;'], ['<', '&lt;'], ['>', '&gt;'], ['"', '&quot;'], ] htmlCodesReversed = htmlCodes[:] htmlCodesReversed.reverse() def htmlEncode(s, codes=htmlCodes): """ Returns the HTML encoded version of the given string. This is useful to display a plain ASCII text string on a web page.""" for code in codes: s = s.replace(code[0], code[1]) return s
[ [ 8, 0, 0.1667, 0.2857, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.4524, 0.2857, 0, 0.66, 0.25, 350, 0, 0, 0, 0, 0, 5, 0 ], [ 14, 0, 0.619, 0.0476, 0, 0.66, ...
[ "\"\"\"This is a copy of the htmlEncode function in Webware.\n\n\n@@TR: It implemented more efficiently.\n\n\"\"\"", "htmlCodes = [\n ['&', '&amp;'],\n ['<', '&lt;'],\n ['>', '&gt;'],\n ['\"', '&quot;'],\n]", "htmlCodesReversed = htmlCodes[:]", "htmlCodesReversed.reverse()", "def htmlEncode(s, c...
import sys import os.path import copy as copyModule from ConfigParser import ConfigParser import re from tokenize import Intnumber, Floatnumber, Number from types import * import types import new import time from StringIO import StringIO # not cStringIO because of unicode support import imp # used by SettingsManager.updateSettingsFromPySrcFile() numberRE = re.compile(Number) complexNumberRE = re.compile('[\(]*' +Number + r'[ \t]*\+[ \t]*' + Number + '[\)]*') convertableToStrTypes = (StringType, IntType, FloatType, LongType, ComplexType, NoneType, UnicodeType) ################################################## ## FUNCTIONS ## def mergeNestedDictionaries(dict1, dict2, copy=False, deepcopy=False): """Recursively merge the values of dict2 into dict1. This little function is very handy for selectively overriding settings in a settings dictionary that has a nested structure. """ if copy: dict1 = copyModule.copy(dict1) elif deepcopy: dict1 = copyModule.deepcopy(dict1) for key, val in dict2.iteritems(): if key in dict1 and isinstance(val, dict) and isinstance(dict1[key], dict): dict1[key] = mergeNestedDictionaries(dict1[key], val) else: dict1[key] = val return dict1 def stringIsNumber(S): """Return True if theString represents a Python number, False otherwise. This also works for complex numbers and numbers with +/- in front.""" S = S.strip() if S[0] in '-+' and len(S) > 1: S = S[1:].strip() match = complexNumberRE.match(S) if not match: match = numberRE.match(S) if not match or (match.end() != len(S)): return False else: return True def convStringToNum(theString): """Convert a string representation of a Python number to the Python version""" if not stringIsNumber(theString): raise Error(theString + ' cannot be converted to a Python number') return eval(theString, {}, {}) class Error(Exception): pass class NoDefault(object): pass class ConfigParserCaseSensitive(ConfigParser): """A case sensitive version of the standard Python ConfigParser.""" def optionxform(self, optionstr): """Don't change the case as is done in the default implemenation.""" return optionstr class _SettingsCollector(object): """An abstract base class that provides the methods SettingsManager uses to collect settings from config files and strings. This class only collects settings it doesn't modify the _settings dictionary of SettingsManager instances in any way. """ _ConfigParserClass = ConfigParserCaseSensitive def readSettingsFromModule(self, mod, ignoreUnderscored=True): """Returns all settings from a Python module. """ S = {} attrs = vars(mod) for k, v in attrs.iteritems(): if (ignoreUnderscored and k.startswith('_')): continue else: S[k] = v return S def readSettingsFromPySrcStr(self, theString): """Return a dictionary of the settings in a Python src string.""" globalsDict = {'True': (1==1), 'False': (0==1), } newSettings = {'self':self} exec((theString+os.linesep), globalsDict, newSettings) del newSettings['self'] module = new.module('temp_settings_module') module.__dict__.update(newSettings) return self.readSettingsFromModule(module) def readSettingsFromConfigFileObj(self, inFile, convert=True): """Return the settings from a config file that uses the syntax accepted by Python's standard ConfigParser module (like Windows .ini files). NOTE: this method maintains case unlike the ConfigParser module, unless this class was initialized with the 'caseSensitive' keyword set to False. All setting values are initially parsed as strings. However, If the 'convert' arg is True this method will do the following value conversions: * all Python numeric literals will be coverted from string to number * The string 'None' will be converted to the Python value None * The string 'True' will be converted to a Python truth value * The string 'False' will be converted to a Python false value * Any string starting with 'python:' will be treated as a Python literal or expression that needs to be eval'd. This approach is useful for declaring lists and dictionaries. If a config section titled 'Globals' is present the options defined under it will be treated as top-level settings. """ p = self._ConfigParserClass() p.readfp(inFile) sects = p.sections() newSettings = {} sects = p.sections() newSettings = {} for s in sects: newSettings[s] = {} for o in p.options(s): if o != '__name__': newSettings[s][o] = p.get(s, o) ## loop through new settings -> deal with global settings, numbers, ## booleans and None ++ also deal with 'importSettings' commands for sect, subDict in newSettings.items(): for key, val in subDict.items(): if convert: if val.lower().startswith('python:'): subDict[key] = eval(val[7:], {}, {}) if val.lower() == 'none': subDict[key] = None if val.lower() == 'true': subDict[key] = True if val.lower() == 'false': subDict[key] = False if stringIsNumber(val): subDict[key] = convStringToNum(val) ## now deal with any 'importSettings' commands if key.lower() == 'importsettings': if val.find(';') < 0: importedSettings = self.readSettingsFromPySrcFile(val) else: path = val.split(';')[0] rest = ''.join(val.split(';')[1:]).strip() parentDict = self.readSettingsFromPySrcFile(path) importedSettings = eval('parentDict["' + rest + '"]') subDict.update(mergeNestedDictionaries(subDict, importedSettings)) if sect.lower() == 'globals': newSettings.update(newSettings[sect]) del newSettings[sect] return newSettings class SettingsManager(_SettingsCollector): """A mixin class that provides facilities for managing application settings. SettingsManager is designed to work well with nested settings dictionaries of any depth. """ def __init__(self): super(SettingsManager, self).__init__() self._settings = {} self._initializeSettings() def _defaultSettings(self): return {} def _initializeSettings(self): """A hook that allows for complex setting initialization sequences that involve references to 'self' or other settings. For example: self._settings['myCalcVal'] = self._settings['someVal'] * 15 This method should be called by the class' __init__() method when needed. The dummy implementation should be reimplemented by subclasses. """ pass ## core post startup methods def setting(self, name, default=NoDefault): """Get a setting from self._settings, with or without a default value.""" if default is NoDefault: return self._settings[name] else: return self._settings.get(name, default) def hasSetting(self, key): """True/False""" return key in self._settings def setSetting(self, name, value): """Set a setting in self._settings.""" self._settings[name] = value def settings(self): """Return a reference to the settings dictionary""" return self._settings def copySettings(self): """Returns a shallow copy of the settings dictionary""" return copyModule.copy(self._settings) def deepcopySettings(self): """Returns a deep copy of the settings dictionary""" return copyModule.deepcopy(self._settings) def updateSettings(self, newSettings, merge=True): """Update the settings with a selective merge or a complete overwrite.""" if merge: mergeNestedDictionaries(self._settings, newSettings) else: self._settings.update(newSettings) ## source specific update methods def updateSettingsFromPySrcStr(self, theString, merge=True): """Update the settings from a code in a Python src string.""" newSettings = self.readSettingsFromPySrcStr(theString) self.updateSettings(newSettings, merge=newSettings.get('mergeSettings', merge) ) def updateSettingsFromConfigFileObj(self, inFile, convert=True, merge=True): """See the docstring for .updateSettingsFromConfigFile() The caller of this method is responsible for closing the inFile file object.""" newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert) self.updateSettings(newSettings, merge=newSettings.get('mergeSettings', merge)) def updateSettingsFromConfigStr(self, configStr, convert=True, merge=True): """See the docstring for .updateSettingsFromConfigFile() """ configStr = '[globals]\n' + configStr inFile = StringIO(configStr) newSettings = self.readSettingsFromConfigFileObj(inFile, convert=convert) self.updateSettings(newSettings, merge=newSettings.get('mergeSettings', merge))
[ [ 1, 0, 0.0034, 0.0034, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0069, 0.0034, 0, 0.66, 0.0455, 79, 0, 1, 0, 0, 79, 0, 0 ], [ 1, 0, 0.0103, 0.0034, 0, 0....
[ "import sys", "import os.path", "import copy as copyModule", "from ConfigParser import ConfigParser", "import re", "from tokenize import Intnumber, Floatnumber, Number", "from types import *", "import types", "import new", "import time", "from StringIO import StringIO # not cStringIO because of ...
Version = '2.4.1' VersionTuple = (2, 4, 1, 'final', 0) MinCompatibleVersion = '2.0rc6' MinCompatibleVersionTuple = (2, 0, 0, 'candidate', 6) #### def convertVersionStringToTuple(s): versionNum = [0, 0, 0] releaseType = 'final' releaseTypeSubNum = 0 if s.find('a')!=-1: num, releaseTypeSubNum = s.split('a') releaseType = 'alpha' elif s.find('b')!=-1: num, releaseTypeSubNum = s.split('b') releaseType = 'beta' elif s.find('rc')!=-1: num, releaseTypeSubNum = s.split('rc') releaseType = 'candidate' else: num = s num = num.split('.') for i in range(len(num)): versionNum[i] = int(num[i]) if len(versionNum)<3: versionNum += [0] releaseTypeSubNum = int(releaseTypeSubNum) return tuple(versionNum+[releaseType, releaseTypeSubNum]) if __name__ == '__main__': c = convertVersionStringToTuple print(c('2.0a1')) print(c('2.0b1')) print(c('2.0rc1')) print(c('2.0')) print(c('2.0.2')) assert c('0.9.19b1') < c('0.9.19') assert c('0.9b1') < c('0.9.19') assert c('2.0a2') > c('2.0a1') assert c('2.0b1') > c('2.0a2') assert c('2.0b2') > c('2.0b1') assert c('2.0b2') == c('2.0b2') assert c('2.0rc1') > c('2.0b1') assert c('2.0rc2') > c('2.0rc1') assert c('2.0rc2') > c('2.0b1') assert c('2.0') > c('2.0a1') assert c('2.0') > c('2.0b1') assert c('2.0') > c('2.0rc1') assert c('2.0.1') > c('2.0') assert c('2.0rc1') > c('2.0b1')
[ [ 14, 0, 0.0172, 0.0172, 0, 0.66, 0, 444, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.0345, 0.0172, 0, 0.66, 0.2, 292, 0, 0, 0, 0, 0, 8, 0 ], [ 14, 0, 0.069, 0.0172, 0, 0.66,...
[ "Version = '2.4.1'", "VersionTuple = (2, 4, 1, 'final', 0)", "MinCompatibleVersion = '2.0rc6'", "MinCompatibleVersionTuple = (2, 0, 0, 'candidate', 6)", "def convertVersionStringToTuple(s):\n versionNum = [0, 0, 0]\n releaseType = 'final'\n releaseTypeSubNum = 0\n if s.find('a')!=-1:\n nu...
''' Cheetah is an open source template engine and code generation tool. It can be used standalone or combined with other tools and frameworks. Web development is its principle use, but Cheetah is very flexible and is also being used to generate C++ game code, Java, sql, form emails and even Python code. Homepage http://www.cheetahtemplate.org/ Documentation http://cheetahtemplate.org/learn.html Mailing list cheetahtemplate-discuss@lists.sourceforge.net Subscribe at http://lists.sourceforge.net/lists/listinfo/cheetahtemplate-discuss ''' from Version import *
[ [ 8, 0, 0.475, 0.9, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 1, 0.05, 0, 0.66, 1, 444, 0, 1, 0, 0, 444, 0, 0 ] ]
[ "'''\nCheetah is an open source template engine and code generation tool.\n\nIt can be used standalone or combined with other tools and frameworks. Web\ndevelopment is its principle use, but Cheetah is very flexible and is also being\nused to generate C++ game code, Java, sql, form emails and even Python code.\n\nH...
import os.path import string l = ['_'] * 256 for c in string.digits + string.letters: l[ord(c)] = c _pathNameTransChars = string.join(l, '') del l, c def convertTmplPathToModuleName(tmplPath, _pathNameTransChars=_pathNameTransChars, splitdrive=os.path.splitdrive, translate=string.translate, ): return translate(splitdrive(tmplPath)[1], _pathNameTransChars)
[ [ 1, 0, 0.0667, 0.0667, 0, 0.66, 0, 79, 0, 1, 0, 0, 79, 0, 0 ], [ 1, 0, 0.1333, 0.0667, 0, 0.66, 0.2, 890, 0, 1, 0, 0, 890, 0, 0 ], [ 14, 0, 0.2667, 0.0667, 0, 0.66...
[ "import os.path", "import string", "l = ['_'] * 256", "for c in string.digits + string.letters:\n l[ord(c)] = c", " l[ord(c)] = c", "_pathNameTransChars = string.join(l, '')", "def convertTmplPathToModuleName(tmplPath,\n _pathNameTransChars=_pathNameTransChars,\n ...
#!/usr/bin/env python import os import pprint try: from functools import reduce except ImportError: # Assume we have reduce pass from Cheetah import Parser from Cheetah import Compiler from Cheetah import Template class Analyzer(Parser.Parser): def __init__(self, *args, **kwargs): self.calls = {} super(Analyzer, self).__init__(*args, **kwargs) def eatDirective(self): directive = self.matchDirective() try: self.calls[directive] += 1 except KeyError: self.calls[directive] = 1 super(Analyzer, self).eatDirective() class AnalysisCompiler(Compiler.ModuleCompiler): parserClass = Analyzer def analyze(source): klass = Template.Template.compile(source, compilerClass=AnalysisCompiler) return klass._CHEETAH_compilerInstance._parser.calls def main_file(f): fd = open(f, 'r') try: print u'>>> Analyzing %s' % f calls = analyze(fd.read()) return calls finally: fd.close() def _find_templates(directory, suffix): for root, dirs, files in os.walk(directory): for f in files: if not f.endswith(suffix): continue yield root + os.path.sep + f def _analyze_templates(iterable): for template in iterable: yield main_file(template) def main_dir(opts): results = _analyze_templates(_find_templates(opts.dir, opts.suffix)) totals = {} for series in results: if not series: continue for k, v in series.iteritems(): try: totals[k] += v except KeyError: totals[k] = v return totals def main(): from optparse import OptionParser op = OptionParser() op.add_option('-f', '--file', dest='file', default=None, help='Specify a single file to analyze') op.add_option('-d', '--dir', dest='dir', default=None, help='Specify a directory of templates to analyze') op.add_option('--suffix', default='tmpl', dest='suffix', help='Specify a custom template file suffix for the -d option (default: "tmpl")') opts, args = op.parse_args() if not opts.file and not opts.dir: op.print_help() return results = None if opts.file: results = main_file(opts.file) if opts.dir: results = main_dir(opts) pprint.pprint(results) if __name__ == '__main__': main()
[ [ 1, 0, 0.0306, 0.0102, 0, 0.66, 0, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0408, 0.0102, 0, 0.66, 0.0714, 276, 0, 1, 0, 0, 276, 0, 0 ], [ 7, 0, 0.0816, 0.051, 0, 0...
[ "import os", "import pprint", "try:\n from functools import reduce\nexcept ImportError:\n # Assume we have reduce\n pass", " from functools import reduce", "from Cheetah import Parser", "from Cheetah import Compiler", "from Cheetah import Template", "class Analyzer(Parser.Parser):\n def...
# $Id: ImportHooks.py,v 1.27 2007/11/16 18:28:47 tavis_rudd Exp $ """Provides some import hooks to allow Cheetah's .tmpl files to be imported directly like Python .py modules. To use these: import Cheetah.ImportHooks Cheetah.ImportHooks.install() Meta-Data ================================================================================ Author: Tavis Rudd <tavis@damnsimple.com> License: This software is released for unlimited distribution under the terms of the MIT license. See the LICENSE file. Version: $Revision: 1.27 $ Start Date: 2001/03/30 Last Revision Date: $Date: 2007/11/16 18:28:47 $ """ __author__ = "Tavis Rudd <tavis@damnsimple.com>" __revision__ = "$Revision: 1.27 $"[11:-2] import sys import os.path import types import __builtin__ import new import imp from threading import RLock import string import traceback from Cheetah import ImportManager from Cheetah.ImportManager import DirOwner from Cheetah.Compiler import Compiler from Cheetah.convertTmplPathToModuleName import convertTmplPathToModuleName _installed = False ################################################## ## HELPER FUNCS _cacheDir = [] def setCacheDir(cacheDir): global _cacheDir _cacheDir.append(cacheDir) ################################################## ## CLASSES class CheetahDirOwner(DirOwner): _lock = RLock() _acquireLock = _lock.acquire _releaseLock = _lock.release templateFileExtensions = ('.tmpl',) def getmod(self, name): self._acquireLock() try: mod = DirOwner.getmod(self, name) if mod: return mod for ext in self.templateFileExtensions: tmplPath = os.path.join(self.path, name + ext) if os.path.exists(tmplPath): try: return self._compile(name, tmplPath) except: # @@TR: log the error exc_txt = traceback.format_exc() exc_txt =' '+(' \n'.join(exc_txt.splitlines())) raise ImportError( 'Error while compiling Cheetah module' ' %(name)s, original traceback follows:\n%(exc_txt)s'%locals()) ## return None finally: self._releaseLock() def _compile(self, name, tmplPath): ## @@ consider adding an ImportError raiser here code = str(Compiler(file=tmplPath, moduleName=name, mainClassName=name)) if _cacheDir: __file__ = os.path.join(_cacheDir[0], convertTmplPathToModuleName(tmplPath)) + '.py' try: open(__file__, 'w').write(code) except OSError: ## @@ TR: need to add some error code here traceback.print_exc(file=sys.stderr) __file__ = tmplPath else: __file__ = tmplPath co = compile(code+'\n', __file__, 'exec') mod = imp.new_module(name) mod.__file__ = co.co_filename if _cacheDir: mod.__orig_file__ = tmplPath # @@TR: this is used in the WebKit # filemonitoring code mod.__co__ = co return mod ################################################## ## FUNCTIONS def install(templateFileExtensions=('.tmpl',)): """Install the Cheetah Import Hooks""" global _installed if not _installed: CheetahDirOwner.templateFileExtensions = templateFileExtensions import __builtin__ if isinstance(__builtin__.__import__, types.BuiltinFunctionType): global __oldimport__ __oldimport__ = __builtin__.__import__ ImportManager._globalOwnerTypes.insert(0, CheetahDirOwner) #ImportManager._globalOwnerTypes.append(CheetahDirOwner) global _manager _manager=ImportManager.ImportManager() _manager.setThreaded() _manager.install() def uninstall(): """Uninstall the Cheetah Import Hooks""" global _installed if not _installed: import __builtin__ if isinstance(__builtin__.__import__, types.MethodType): __builtin__.__import__ = __oldimport__ global _manager del _manager if __name__ == '__main__': install()
[ [ 8, 0, 0.0761, 0.1159, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.1377, 0.0072, 0, 0.66, 0.0455, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.1449, 0.0072, 0, 0.66...
[ "\"\"\"Provides some import hooks to allow Cheetah's .tmpl files to be imported\ndirectly like Python .py modules.\n\nTo use these:\n import Cheetah.ImportHooks\n Cheetah.ImportHooks.install()\n\nMeta-Data", "__author__ = \"Tavis Rudd <tavis@damnsimple.com>\"", "__revision__ = \"$Revision: 1.27 $\"[11:-2]", ...
# $Id: _SkeletonPage.py,v 1.13 2002/10/01 17:52:02 tavis_rudd Exp $ """A baseclass for the SkeletonPage template Meta-Data ========== Author: Tavis Rudd <tavis@damnsimple.com>, Version: $Revision: 1.13 $ Start Date: 2001/04/05 Last Revision Date: $Date: 2002/10/01 17:52:02 $ """ __author__ = "Tavis Rudd <tavis@damnsimple.com>" __revision__ = "$Revision: 1.13 $"[11:-2] ################################################## ## DEPENDENCIES ## import time, types, os, sys # intra-package imports ... from Cheetah.Template import Template ################################################## ## GLOBALS AND CONSTANTS ## True = (1==1) False = (0==1) ################################################## ## CLASSES ## class _SkeletonPage(Template): """A baseclass for the SkeletonPage template""" docType = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" ' + \ '"http://www.w3.org/TR/html4/loose.dtd">' # docType = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" ' + \ #'"http://www.w3.org/TR/xhtml1l/DTD/transitional.dtd">' title = '' siteDomainName = 'www.example.com' siteCredits = 'Designed & Implemented by Tavis Rudd' siteCopyrightName = "Tavis Rudd" htmlTag = '<html>' def __init__(self, *args, **KWs): Template.__init__(self, *args, **KWs) self._metaTags = {'HTTP-EQUIV':{'keywords': 'Cheetah', 'Content-Type': 'text/html; charset=iso-8859-1', }, 'NAME':{'generator':'Cheetah: The Python-Powered Template Engine'} } # metaTags = {'HTTP_EQUIV':{'test':1234}, 'NAME':{'test':1234,'test2':1234} } self._stylesheets = {} # stylesheets = {'.cssClassName':'stylesheetCode'} self._stylesheetsOrder = [] # stylesheetsOrder = ['.cssClassName',] self._stylesheetLibs = {} # stylesheetLibs = {'libName':'libSrcPath'} self._javascriptLibs = {} self._javascriptTags = {} # self._javascriptLibs = {'libName':'libSrcPath'} self._bodyTagAttribs = {} def metaTags(self): """Return a formatted vesion of the self._metaTags dictionary, using the formatMetaTags function from Cheetah.Macros.HTML""" return self.formatMetaTags(self._metaTags) def stylesheetTags(self): """Return a formatted version of the self._stylesheetLibs and self._stylesheets dictionaries. The keys in self._stylesheets must be listed in the order that they should appear in the list self._stylesheetsOrder, to ensure that the style rules are defined in the correct order.""" stylesheetTagsTxt = '' for title, src in self._stylesheetLibs.items(): stylesheetTagsTxt += '<link rel="stylesheet" type="text/css" href="' + str(src) + '" />\n' if not self._stylesheetsOrder: return stylesheetTagsTxt stylesheetTagsTxt += '<style type="text/css"><!--\n' for identifier in self._stylesheetsOrder: if identifier not in self._stylesheets: warning = '# the identifier ' + identifier + \ 'was in stylesheetsOrder, but not in stylesheets' print(warning) stylesheetTagsTxt += warning continue attribsDict = self._stylesheets[identifier] cssCode = '' attribCode = '' for k, v in attribsDict.items(): attribCode += str(k) + ': ' + str(v) + '; ' attribCode = attribCode[:-2] # get rid of the last semicolon cssCode = '\n' + identifier + ' {' + attribCode + '}' stylesheetTagsTxt += cssCode stylesheetTagsTxt += '\n//--></style>\n' return stylesheetTagsTxt def javascriptTags(self): """Return a formatted version of the javascriptTags and javascriptLibs dictionaries. Each value in javascriptTags should be a either a code string to include, or a list containing the JavaScript version number and the code string. The keys can be anything. The same applies for javascriptLibs, but the string should be the SRC filename rather than a code string.""" javascriptTagsTxt = [] for key, details in self._javascriptTags.iteritems(): if not isinstance(details, (list, tuple)): details = ['', details] javascriptTagsTxt += ['<script language="JavaScript', str(details[0]), '" type="text/javascript"><!--\n', str(details[0]), '\n//--></script>\n'] for key, details in self._javascriptLibs.iteritems(): if not isinstance(details, (list, tuple)): details = ['', details] javascriptTagsTxt += ['<script language="JavaScript', str(details[0]), '" type="text/javascript" src="', str(details[1]), '" />\n'] return ''.join(javascriptTagsTxt) def bodyTag(self): """Create a body tag from the entries in the dict bodyTagAttribs.""" return self.formHTMLTag('body', self._bodyTagAttribs) def imgTag(self, src, alt='', width=None, height=None, border=0): """Dynamically generate an image tag. Cheetah will try to convert the src argument to a WebKit serverSidePath relative to the servlet's location. If width and height aren't specified they are calculated using PIL or ImageMagick if available.""" src = self.normalizePath(src) if not width or not height: try: # see if the dimensions can be calc'd with PIL import Image im = Image.open(src) calcWidth, calcHeight = im.size del im if not width: width = calcWidth if not height: height = calcHeight except: try: # try imageMagick instead calcWidth, calcHeight = os.popen( 'identify -format "%w,%h" ' + src).read().split(',') if not width: width = calcWidth if not height: height = calcHeight except: pass if width and height: return ''.join(['<img src="', src, '" width="', str(width), '" height="', str(height), '" alt="', alt, '" border="', str(border), '" />']) elif width: return ''.join(['<img src="', src, '" width="', str(width), '" alt="', alt, '" border="', str(border), '" />']) elif height: return ''.join(['<img src="', src, '" height="', str(height), '" alt="', alt, '" border="', str(border), '" />']) else: return ''.join(['<img src="', src, '" alt="', alt, '" border="', str(border), '" />']) def currentYr(self): """Return a string representing the current yr.""" return time.strftime("%Y", time.localtime(time.time())) def currentDate(self, formatString="%b %d, %Y"): """Return a string representing the current localtime.""" return time.strftime(formatString, time.localtime(time.time())) def spacer(self, width=1,height=1): return '<img src="spacer.gif" width="%s" height="%s" alt="" />'% (str(width), str(height)) def formHTMLTag(self, tagName, attributes={}): """returns a string containing an HTML <tag> """ tagTxt = ['<', tagName.lower()] for name, val in attributes.items(): tagTxt += [' ', name.lower(), '="', str(val), '"'] tagTxt.append('>') return ''.join(tagTxt) def formatMetaTags(self, metaTags): """format a dict of metaTag definitions into an HTML version""" metaTagsTxt = [] if 'HTTP-EQUIV' in metaTags: for http_equiv, contents in metaTags['HTTP-EQUIV'].items(): metaTagsTxt += ['<meta http-equiv="', str(http_equiv), '" content="', str(contents), '" />\n'] if 'NAME' in metaTags: for name, contents in metaTags['NAME'].items(): metaTagsTxt += ['<meta name="', str(name), '" content="', str(contents), '" />\n'] return ''.join(metaTagsTxt)
[ [ 8, 0, 0.0282, 0.0423, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 14, 0, 0.0516, 0.0047, 0, 0.66, 0.2, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 14, 0, 0.0563, 0.0047, 0, 0.66, ...
[ "\"\"\"A baseclass for the SkeletonPage template\n\nMeta-Data\n==========\nAuthor: Tavis Rudd <tavis@damnsimple.com>,\nVersion: $Revision: 1.13 $\nStart Date: 2001/04/05\nLast Revision Date: $Date: 2002/10/01 17:52:02 $", "__author__ = \"Tavis Rudd <tavis@damnsimple.com>\"", "__revision__ = \"$Revision: 1.13 $\...
""" Parser classes for Cheetah's Compiler Classes: ParseError( Exception ) _LowLevelParser( Cheetah.SourceReader.SourceReader ), basically a lexer _HighLevelParser( _LowLevelParser ) Parser === _HighLevelParser (an alias) """ import os import sys import re from re import DOTALL, MULTILINE from types import StringType, ListType, TupleType, ClassType, TypeType import time from tokenize import pseudoprog import inspect import new import traceback from Cheetah.SourceReader import SourceReader from Cheetah import Filters from Cheetah import ErrorCatchers from Cheetah.Unspecified import Unspecified from Cheetah.Macros.I18n import I18n # re tools _regexCache = {} def cachedRegex(pattern): if pattern not in _regexCache: _regexCache[pattern] = re.compile(pattern) return _regexCache[pattern] def escapeRegexChars(txt, escapeRE=re.compile(r'([\$\^\*\+\.\?\{\}\[\]\(\)\|\\])')): """Return a txt with all special regular expressions chars escaped.""" return escapeRE.sub(r'\\\1', txt) def group(*choices): return '(' + '|'.join(choices) + ')' def nongroup(*choices): return '(?:' + '|'.join(choices) + ')' def namedGroup(name, *choices): return '(P:<' + name +'>' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' ################################################## ## CONSTANTS & GLOBALS ## NO_CACHE = 0 STATIC_CACHE = 1 REFRESH_CACHE = 2 SET_LOCAL = 0 SET_GLOBAL = 1 SET_MODULE = 2 ################################################## ## Tokens for the parser ## #generic identchars = "abcdefghijklmnopqrstuvwxyz" \ "ABCDEFGHIJKLMNOPQRSTUVWXYZ_" namechars = identchars + "0123456789" #operators powerOp = '**' unaryArithOps = ('+', '-', '~') binaryArithOps = ('+', '-', '/', '//', '%') shiftOps = ('>>', '<<') bitwiseOps = ('&', '|', '^') assignOp = '=' augAssignOps = ('+=', '-=', '/=', '*=', '**=', '^=', '%=', '>>=', '<<=', '&=', '|=', ) assignmentOps = (assignOp,) + augAssignOps compOps = ('<', '>', '==', '!=', '<=', '>=', '<>', 'is', 'in',) booleanOps = ('and', 'or', 'not') operators = (powerOp,) + unaryArithOps + binaryArithOps \ + shiftOps + bitwiseOps + assignmentOps \ + compOps + booleanOps delimeters = ('(', ')', '{', '}', '[', ']', ',', '.', ':', ';', '=', '`') + augAssignOps keywords = ('and', 'del', 'for', 'is', 'raise', 'assert', 'elif', 'from', 'lambda', 'return', 'break', 'else', 'global', 'not', 'try', 'class', 'except', 'if', 'or', 'while', 'continue', 'exec', 'import', 'pass', 'def', 'finally', 'in', 'print', ) single3 = "'''" double3 = '"""' tripleQuotedStringStarts = ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "u'''", 'u"""', "U'''", 'U"""', "ur'''", 'ur"""', "Ur'''", 'Ur"""', "uR'''", 'uR"""', "UR'''", 'UR"""') tripleQuotedStringPairs = {"'''": single3, '"""': double3, "r'''": single3, 'r"""': double3, "u'''": single3, 'u"""': double3, "ur'''": single3, 'ur"""': double3, "R'''": single3, 'R"""': double3, "U'''": single3, 'U"""': double3, "uR'''": single3, 'uR"""': double3, "Ur'''": single3, 'Ur"""': double3, "UR'''": single3, 'UR"""': double3, } closurePairs= {')':'(',']':'[','}':'{'} closurePairsRev= {'(':')','[':']','{':'}'} ################################################## ## Regex chunks for the parser ## tripleQuotedStringREs = {} def makeTripleQuoteRe(start, end): start = escapeRegexChars(start) end = escapeRegexChars(end) return re.compile(r'(?:' + start + r').*?' + r'(?:' + end + r')', re.DOTALL) for start, end in tripleQuotedStringPairs.items(): tripleQuotedStringREs[start] = makeTripleQuoteRe(start, end) WS = r'[ \f\t]*' EOL = r'\r\n|\n|\r' EOLZ = EOL + r'|\Z' escCharLookBehind = nongroup(r'(?<=\A)', r'(?<!\\)') nameCharLookAhead = r'(?=[A-Za-z_])' identRE=re.compile(r'[a-zA-Z_][a-zA-Z_0-9]*') EOLre=re.compile(r'(?:\r\n|\r|\n)') specialVarRE=re.compile(r'([a-zA-z_]+)@') # for matching specialVar comments # e.g. ##author@ Tavis Rudd unicodeDirectiveRE = re.compile( r'(?:^|\r\n|\r|\n)\s*#\s{0,5}unicode[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE) encodingDirectiveRE = re.compile( r'(?:^|\r\n|\r|\n)\s*#\s{0,5}encoding[:\s]*([-\w.]*)\s*(?:\r\n|\r|\n)', re.MULTILINE) escapedNewlineRE = re.compile(r'(?<!\\)((\\\\)*)\\(n|012)') directiveNamesAndParsers = { # importing and inheritance 'import': None, 'from': None, 'extends': 'eatExtends', 'implements': 'eatImplements', 'super': 'eatSuper', # output, filtering, and caching 'slurp': 'eatSlurp', 'raw': 'eatRaw', 'include': 'eatInclude', 'cache': 'eatCache', 'filter': 'eatFilter', 'echo': None, 'silent': None, 'transform': 'eatTransform', 'call': 'eatCall', 'arg': 'eatCallArg', 'capture': 'eatCapture', # declaration, assignment, and deletion 'attr': 'eatAttr', 'def': 'eatDef', 'block': 'eatBlock', '@': 'eatDecorator', 'defmacro': 'eatDefMacro', 'closure': 'eatClosure', 'set': 'eatSet', 'del': None, # flow control 'if': 'eatIf', 'while': None, 'for': None, 'else': None, 'elif': None, 'pass': None, 'break': None, 'continue': None, 'stop': None, 'return': None, 'yield': None, # little wrappers 'repeat': None, 'unless': None, # error handling 'assert': None, 'raise': None, 'try': None, 'except': None, 'finally': None, 'errorCatcher': 'eatErrorCatcher', # intructions to the parser and compiler 'breakpoint': 'eatBreakPoint', 'compiler': 'eatCompiler', 'compiler-settings': 'eatCompilerSettings', # misc 'shBang': 'eatShbang', 'encoding': 'eatEncoding', 'end': 'eatEndDirective', } endDirectiveNamesAndHandlers = { 'def': 'handleEndDef', # has short-form 'block': None, # has short-form 'closure': None, # has short-form 'cache': None, # has short-form 'call': None, # has short-form 'capture': None, # has short-form 'filter': None, 'errorCatcher': None, 'while': None, # has short-form 'for': None, # has short-form 'if': None, # has short-form 'try': None, # has short-form 'repeat': None, # has short-form 'unless': None, # has short-form } ################################################## ## CLASSES ## # @@TR: SyntaxError doesn't call exception.__str__ for some reason! #class ParseError(SyntaxError): class ParseError(ValueError): def __init__(self, stream, msg='Invalid Syntax', extMsg='', lineno=None, col=None): self.stream = stream if stream.pos() >= len(stream): stream.setPos(len(stream) -1) self.msg = msg self.extMsg = extMsg self.lineno = lineno self.col = col def __str__(self): return self.report() def report(self): stream = self.stream if stream.filename(): f = " in file %s" % stream.filename() else: f = '' report = '' if self.lineno: lineno = self.lineno row, col, line = (lineno, (self.col or 0), self.stream.splitlines()[lineno-1]) else: row, col, line = self.stream.getRowColLine() ## get the surrounding lines lines = stream.splitlines() prevLines = [] # (rowNum, content) for i in range(1, 4): if row-1-i <=0: break prevLines.append( (row-i, lines[row-1-i]) ) nextLines = [] # (rowNum, content) for i in range(1, 4): if not row-1+i < len(lines): break nextLines.append( (row+i, lines[row-1+i]) ) nextLines.reverse() ## print the main message report += "\n\n%s\n" %self.msg report += "Line %i, column %i%s\n\n" % (row, col, f) report += 'Line|Cheetah Code\n' report += '----|-------------------------------------------------------------\n' while prevLines: lineInfo = prevLines.pop() report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]} report += "%(row)-4d|%(line)s\n"% {'row':row, 'line':line} report += ' '*5 +' '*(col-1) + "^\n" while nextLines: lineInfo = nextLines.pop() report += "%(row)-4d|%(line)s\n"% {'row':lineInfo[0], 'line':lineInfo[1]} ## add the extra msg if self.extMsg: report += self.extMsg + '\n' return report class ForbiddenSyntax(ParseError): pass class ForbiddenExpression(ForbiddenSyntax): pass class ForbiddenDirective(ForbiddenSyntax): pass class CheetahVariable(object): def __init__(self, nameChunks, useNameMapper=True, cacheToken=None, rawSource=None): self.nameChunks = nameChunks self.useNameMapper = useNameMapper self.cacheToken = cacheToken self.rawSource = rawSource class Placeholder(CheetahVariable): pass class ArgList(object): """Used by _LowLevelParser.getArgList()""" def __init__(self): self.arguments = [] self.defaults = [] self.count = 0 def add_argument(self, name): self.arguments.append(name) self.defaults.append(None) def next(self): self.count += 1 def add_default(self, token): count = self.count if self.defaults[count] is None: self.defaults[count] = '' self.defaults[count] += token def merge(self): defaults = (isinstance(d, basestring) and d.strip() or None for d in self.defaults) return list(map(None, (a.strip() for a in self.arguments), defaults)) def __str__(self): return str(self.merge()) class _LowLevelParser(SourceReader): """This class implements the methods to match or extract ('get*') the basic elements of Cheetah's grammar. It does NOT handle any code generation or state management. """ _settingsManager = None def setSettingsManager(self, settingsManager): self._settingsManager = settingsManager def setting(self, key, default=Unspecified): if default is Unspecified: return self._settingsManager.setting(key) else: return self._settingsManager.setting(key, default=default) def setSetting(self, key, val): self._settingsManager.setSetting(key, val) def settings(self): return self._settingsManager.settings() def updateSettings(self, settings): self._settingsManager.updateSettings(settings) def _initializeSettings(self): self._settingsManager._initializeSettings() def configureParser(self): """Is called by the Compiler instance after the parser has had a settingsManager assigned with self.setSettingsManager() """ self._makeCheetahVarREs() self._makeCommentREs() self._makeDirectiveREs() self._makePspREs() self._possibleNonStrConstantChars = ( self.setting('commentStartToken')[0] + self.setting('multiLineCommentStartToken')[0] + self.setting('cheetahVarStartToken')[0] + self.setting('directiveStartToken')[0] + self.setting('PSPStartToken')[0]) self._nonStrConstMatchers = [ self.matchCommentStartToken, self.matchMultiLineCommentStartToken, self.matchVariablePlaceholderStart, self.matchExpressionPlaceholderStart, self.matchDirective, self.matchPSPStartToken, self.matchEOLSlurpToken, ] ## regex setup ## def _makeCheetahVarREs(self): """Setup the regexs for Cheetah $var parsing.""" num = r'[0-9\.]+' interval = (r'(?P<interval>' + num + r's|' + num + r'm|' + num + r'h|' + num + r'd|' + num + r'w|' + num + ')' ) cacheToken = (r'(?:' + r'(?P<REFRESH_CACHE>\*' + interval + '\*)'+ '|' + r'(?P<STATIC_CACHE>\*)' + '|' + r'(?P<NO_CACHE>)' + ')') self.cacheTokenRE = cachedRegex(cacheToken) silentPlaceholderToken = (r'(?:' + r'(?P<SILENT>' +escapeRegexChars('!')+')'+ '|' + r'(?P<NOT_SILENT>)' + ')') self.silentPlaceholderTokenRE = cachedRegex(silentPlaceholderToken) self.cheetahVarStartRE = cachedRegex( escCharLookBehind + r'(?P<startToken>'+escapeRegexChars(self.setting('cheetahVarStartToken'))+')'+ r'(?P<silenceToken>'+silentPlaceholderToken+')'+ r'(?P<cacheToken>'+cacheToken+')'+ r'(?P<enclosure>|(?:(?:\{|\(|\[)[ \t\f]*))' + # allow WS after enclosure r'(?=[A-Za-z_])') validCharsLookAhead = r'(?=[A-Za-z_\*!\{\(\[])' self.cheetahVarStartToken = self.setting('cheetahVarStartToken') self.cheetahVarStartTokenRE = cachedRegex( escCharLookBehind + escapeRegexChars(self.setting('cheetahVarStartToken')) +validCharsLookAhead ) self.cheetahVarInExpressionStartTokenRE = cachedRegex( escapeRegexChars(self.setting('cheetahVarStartToken')) +r'(?=[A-Za-z_])' ) self.expressionPlaceholderStartRE = cachedRegex( escCharLookBehind + r'(?P<startToken>' + escapeRegexChars(self.setting('cheetahVarStartToken')) + ')' + r'(?P<cacheToken>' + cacheToken + ')' + #r'\[[ \t\f]*' r'(?:\{|\(|\[)[ \t\f]*' + r'(?=[^\)\}\]])' ) if self.setting('EOLSlurpToken'): self.EOLSlurpRE = cachedRegex( escapeRegexChars(self.setting('EOLSlurpToken')) + r'[ \t\f]*' + r'(?:'+EOL+')' ) else: self.EOLSlurpRE = None def _makeCommentREs(self): """Construct the regex bits that are used in comment parsing.""" startTokenEsc = escapeRegexChars(self.setting('commentStartToken')) self.commentStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc) del startTokenEsc startTokenEsc = escapeRegexChars( self.setting('multiLineCommentStartToken')) endTokenEsc = escapeRegexChars( self.setting('multiLineCommentEndToken')) self.multiLineCommentTokenStartRE = cachedRegex(escCharLookBehind + startTokenEsc) self.multiLineCommentEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc) def _makeDirectiveREs(self): """Construct the regexs that are used in directive parsing.""" startToken = self.setting('directiveStartToken') endToken = self.setting('directiveEndToken') startTokenEsc = escapeRegexChars(startToken) endTokenEsc = escapeRegexChars(endToken) validSecondCharsLookAhead = r'(?=[A-Za-z_@])' reParts = [escCharLookBehind, startTokenEsc] if self.setting('allowWhitespaceAfterDirectiveStartToken'): reParts.append('[ \t]*') reParts.append(validSecondCharsLookAhead) self.directiveStartTokenRE = cachedRegex(''.join(reParts)) self.directiveEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc) def _makePspREs(self): """Setup the regexs for PSP parsing.""" startToken = self.setting('PSPStartToken') startTokenEsc = escapeRegexChars(startToken) self.PSPStartTokenRE = cachedRegex(escCharLookBehind + startTokenEsc) endToken = self.setting('PSPEndToken') endTokenEsc = escapeRegexChars(endToken) self.PSPEndTokenRE = cachedRegex(escCharLookBehind + endTokenEsc) def _unescapeCheetahVars(self, theString): """Unescape any escaped Cheetah \$vars in the string. """ token = self.setting('cheetahVarStartToken') return theString.replace('\\' + token, token) def _unescapeDirectives(self, theString): """Unescape any escaped Cheetah directives in the string. """ token = self.setting('directiveStartToken') return theString.replace('\\' + token, token) def isLineClearToStartToken(self, pos=None): return self.isLineClearToPos(pos) def matchTopLevelToken(self): """Returns the first match found from the following methods: self.matchCommentStartToken self.matchMultiLineCommentStartToken self.matchVariablePlaceholderStart self.matchExpressionPlaceholderStart self.matchDirective self.matchPSPStartToken self.matchEOLSlurpToken Returns None if no match. """ match = None if self.peek() in self._possibleNonStrConstantChars: for matcher in self._nonStrConstMatchers: match = matcher() if match: break return match def matchPyToken(self): match = pseudoprog.match(self.src(), self.pos()) if match and match.group() in tripleQuotedStringStarts: TQSmatch = tripleQuotedStringREs[match.group()].match(self.src(), self.pos()) if TQSmatch: return TQSmatch return match def getPyToken(self): match = self.matchPyToken() if match is None: raise ParseError(self) elif match.group() in tripleQuotedStringStarts: raise ParseError(self, msg='Malformed triple-quoted string') return self.readTo(match.end()) def matchEOLSlurpToken(self): if self.EOLSlurpRE: return self.EOLSlurpRE.match(self.src(), self.pos()) def getEOLSlurpToken(self): match = self.matchEOLSlurpToken() if not match: raise ParseError(self, msg='Invalid EOL slurp token') return self.readTo(match.end()) def matchCommentStartToken(self): return self.commentStartTokenRE.match(self.src(), self.pos()) def getCommentStartToken(self): match = self.matchCommentStartToken() if not match: raise ParseError(self, msg='Invalid single-line comment start token') return self.readTo(match.end()) def matchMultiLineCommentStartToken(self): return self.multiLineCommentTokenStartRE.match(self.src(), self.pos()) def getMultiLineCommentStartToken(self): match = self.matchMultiLineCommentStartToken() if not match: raise ParseError(self, msg='Invalid multi-line comment start token') return self.readTo(match.end()) def matchMultiLineCommentEndToken(self): return self.multiLineCommentEndTokenRE.match(self.src(), self.pos()) def getMultiLineCommentEndToken(self): match = self.matchMultiLineCommentEndToken() if not match: raise ParseError(self, msg='Invalid multi-line comment end token') return self.readTo(match.end()) def getCommaSeparatedSymbols(self): """ Loosely based on getDottedName to pull out comma separated named chunks """ srcLen = len(self) pieces = [] nameChunks = [] if not self.peek() in identchars: raise ParseError(self) while self.pos() < srcLen: c = self.peek() if c in namechars: nameChunk = self.getIdentifier() nameChunks.append(nameChunk) elif c == '.': if self.pos()+1 <srcLen and self.peek(1) in identchars: nameChunks.append(self.getc()) else: break elif c == ',': self.getc() pieces.append(''.join(nameChunks)) nameChunks = [] elif c in (' ', '\t'): self.getc() else: break if nameChunks: pieces.append(''.join(nameChunks)) return pieces def getDottedName(self): srcLen = len(self) nameChunks = [] if not self.peek() in identchars: raise ParseError(self) while self.pos() < srcLen: c = self.peek() if c in namechars: nameChunk = self.getIdentifier() nameChunks.append(nameChunk) elif c == '.': if self.pos()+1 <srcLen and self.peek(1) in identchars: nameChunks.append(self.getc()) else: break else: break return ''.join(nameChunks) def matchIdentifier(self): return identRE.match(self.src(), self.pos()) def getIdentifier(self): match = self.matchIdentifier() if not match: raise ParseError(self, msg='Invalid identifier') return self.readTo(match.end()) def matchOperator(self): match = self.matchPyToken() if match and match.group() not in operators: match = None return match def getOperator(self): match = self.matchOperator() if not match: raise ParseError(self, msg='Expected operator') return self.readTo( match.end() ) def matchAssignmentOperator(self): match = self.matchPyToken() if match and match.group() not in assignmentOps: match = None return match def getAssignmentOperator(self): match = self.matchAssignmentOperator() if not match: raise ParseError(self, msg='Expected assignment operator') return self.readTo( match.end() ) def matchDirective(self): """Returns False or the name of the directive matched. """ startPos = self.pos() if not self.matchDirectiveStartToken(): return False self.getDirectiveStartToken() directiveName = self.matchDirectiveName() self.setPos(startPos) return directiveName def matchDirectiveName(self, directiveNameChars=identchars+'0123456789-@'): startPos = self.pos() possibleMatches = self._directiveNamesAndParsers.keys() name = '' match = None while not self.atEnd(): c = self.getc() if not c in directiveNameChars: break name += c if name == '@': if not self.atEnd() and self.peek() in identchars: match = '@' break possibleMatches = [dn for dn in possibleMatches if dn.startswith(name)] if not possibleMatches: break elif (name in possibleMatches and (self.atEnd() or self.peek() not in directiveNameChars)): match = name break self.setPos(startPos) return match def matchDirectiveStartToken(self): return self.directiveStartTokenRE.match(self.src(), self.pos()) def getDirectiveStartToken(self): match = self.matchDirectiveStartToken() if not match: raise ParseError(self, msg='Invalid directive start token') return self.readTo(match.end()) def matchDirectiveEndToken(self): return self.directiveEndTokenRE.match(self.src(), self.pos()) def getDirectiveEndToken(self): match = self.matchDirectiveEndToken() if not match: raise ParseError(self, msg='Invalid directive end token') return self.readTo(match.end()) def matchColonForSingleLineShortFormDirective(self): if not self.atEnd() and self.peek()==':': restOfLine = self[self.pos()+1:self.findEOL()] restOfLine = restOfLine.strip() if not restOfLine: return False elif self.commentStartTokenRE.match(restOfLine): return False else: # non-whitespace, non-commment chars found return True return False def matchPSPStartToken(self): return self.PSPStartTokenRE.match(self.src(), self.pos()) def matchPSPEndToken(self): return self.PSPEndTokenRE.match(self.src(), self.pos()) def getPSPStartToken(self): match = self.matchPSPStartToken() if not match: raise ParseError(self, msg='Invalid psp start token') return self.readTo(match.end()) def getPSPEndToken(self): match = self.matchPSPEndToken() if not match: raise ParseError(self, msg='Invalid psp end token') return self.readTo(match.end()) def matchCheetahVarStart(self): """includes the enclosure and cache token""" return self.cheetahVarStartRE.match(self.src(), self.pos()) def matchCheetahVarStartToken(self): """includes the enclosure and cache token""" return self.cheetahVarStartTokenRE.match(self.src(), self.pos()) def matchCheetahVarInExpressionStartToken(self): """no enclosures or cache tokens allowed""" return self.cheetahVarInExpressionStartTokenRE.match(self.src(), self.pos()) def matchVariablePlaceholderStart(self): """includes the enclosure and cache token""" return self.cheetahVarStartRE.match(self.src(), self.pos()) def matchExpressionPlaceholderStart(self): """includes the enclosure and cache token""" return self.expressionPlaceholderStartRE.match(self.src(), self.pos()) def getCheetahVarStartToken(self): """just the start token, not the enclosure or cache token""" match = self.matchCheetahVarStartToken() if not match: raise ParseError(self, msg='Expected Cheetah $var start token') return self.readTo( match.end() ) def getCacheToken(self): try: token = self.cacheTokenRE.match(self.src(), self.pos()) self.setPos( token.end() ) return token.group() except: raise ParseError(self, msg='Expected cache token') def getSilentPlaceholderToken(self): try: token = self.silentPlaceholderTokenRE.match(self.src(), self.pos()) self.setPos( token.end() ) return token.group() except: raise ParseError(self, msg='Expected silent placeholder token') def getTargetVarsList(self): varnames = [] while not self.atEnd(): if self.peek() in ' \t\f': self.getWhiteSpace() elif self.peek() in '\r\n': break elif self.startswith(','): self.advance() elif self.startswith('in ') or self.startswith('in\t'): break #elif self.matchCheetahVarStart(): elif self.matchCheetahVarInExpressionStartToken(): self.getCheetahVarStartToken() self.getSilentPlaceholderToken() self.getCacheToken() varnames.append( self.getDottedName() ) elif self.matchIdentifier(): varnames.append( self.getDottedName() ) else: break return varnames def getCheetahVar(self, plain=False, skipStartToken=False): """This is called when parsing inside expressions. Cache tokens are only valid in placeholders so this method discards any cache tokens found. """ if not skipStartToken: self.getCheetahVarStartToken() self.getSilentPlaceholderToken() self.getCacheToken() return self.getCheetahVarBody(plain=plain) def getCheetahVarBody(self, plain=False): # @@TR: this should be in the compiler return self._compiler.genCheetahVar(self.getCheetahVarNameChunks(), plain=plain) def getCheetahVarNameChunks(self): """ nameChunks = list of Cheetah $var subcomponents represented as tuples [ (namemapperPart,autoCall,restOfName), ] where: namemapperPart = the dottedName base autocall = where NameMapper should use autocalling on namemapperPart restOfName = any arglist, index, or slice If restOfName contains a call arglist (e.g. '(1234)') then autocall is False, otherwise it defaults to True. EXAMPLE ------------------------------------------------------------------------ if the raw CheetahVar is $a.b.c[1].d().x.y.z nameChunks is the list [ ('a.b.c',True,'[1]'), ('d',False,'()'), ('x.y.z',True,''), ] """ chunks = [] while self.pos() < len(self): rest = '' autoCall = True if not self.peek() in identchars + '.': break elif self.peek() == '.': if self.pos()+1 < len(self) and self.peek(1) in identchars: self.advance() # discard the period as it isn't needed with NameMapper else: break dottedName = self.getDottedName() if not self.atEnd() and self.peek() in '([': if self.peek() == '(': rest = self.getCallArgString() else: rest = self.getExpression(enclosed=True) period = max(dottedName.rfind('.'), 0) if period: chunks.append( (dottedName[:period], autoCall, '') ) dottedName = dottedName[period+1:] if rest and rest[0]=='(': autoCall = False chunks.append( (dottedName, autoCall, rest) ) return chunks def getCallArgString(self, enclosures=[], # list of tuples (char, pos), where char is ({ or [ useNameMapper=Unspecified): """ Get a method/function call argument string. This method understands *arg, and **kw """ # @@TR: this settings mangling should be removed if useNameMapper is not Unspecified: useNameMapper_orig = self.setting('useNameMapper') self.setSetting('useNameMapper', useNameMapper) if enclosures: pass else: if not self.peek() == '(': raise ParseError(self, msg="Expected '('") startPos = self.pos() self.getc() enclosures = [('(', startPos), ] argStringBits = ['('] addBit = argStringBits.append while True: if self.atEnd(): open = enclosures[-1][0] close = closurePairsRev[open] self.setPos(enclosures[-1][1]) raise ParseError( self, msg="EOF was reached before a matching '" + close + "' was found for the '" + open + "'") c = self.peek() if c in ")}]": # get the ending enclosure and break if not enclosures: raise ParseError(self) c = self.getc() open = closurePairs[c] if enclosures[-1][0] == open: enclosures.pop() addBit(')') break else: raise ParseError(self) elif c in " \t\f\r\n": addBit(self.getc()) elif self.matchCheetahVarInExpressionStartToken(): startPos = self.pos() codeFor1stToken = self.getCheetahVar() WS = self.getWhiteSpace() if not self.atEnd() and self.peek() == '=': nextToken = self.getPyToken() if nextToken == '=': endPos = self.pos() self.setPos(startPos) codeFor1stToken = self.getCheetahVar(plain=True) self.setPos(endPos) ## finally addBit( codeFor1stToken + WS + nextToken ) else: addBit( codeFor1stToken + WS) elif self.matchCheetahVarStart(): # it has syntax that is only valid at the top level self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr() else: beforeTokenPos = self.pos() token = self.getPyToken() if token in ('{', '(', '['): self.rev() token = self.getExpression(enclosed=True) token = self.transformToken(token, beforeTokenPos) addBit(token) if useNameMapper is not Unspecified: self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above return ''.join(argStringBits) def getDefArgList(self, exitPos=None, useNameMapper=False): """ Get an argument list. Can be used for method/function definition argument lists or for #directive argument lists. Returns a list of tuples in the form (argName, defVal=None) with one tuple for each arg name. These defVals are always strings, so (argName, defVal=None) is safe even with a case like (arg1, arg2=None, arg3=1234*2), which would be returned as [('arg1', None), ('arg2', 'None'), ('arg3', '1234*2'), ] This method understands *arg, and **kw """ if self.peek() == '(': self.advance() else: exitPos = self.findEOL() # it's a directive so break at the EOL argList = ArgList() onDefVal = False # @@TR: this settings mangling should be removed useNameMapper_orig = self.setting('useNameMapper') self.setSetting('useNameMapper', useNameMapper) while True: if self.atEnd(): raise ParseError( self, msg="EOF was reached before a matching ')'"+ " was found for the '('") if self.pos() == exitPos: break c = self.peek() if c == ")" or self.matchDirectiveEndToken(): break elif c == ":": break elif c in " \t\f\r\n": if onDefVal: argList.add_default(c) self.advance() elif c == '=': onDefVal = True self.advance() elif c == ",": argList.next() onDefVal = False self.advance() elif self.startswith(self.cheetahVarStartToken) and not onDefVal: self.advance(len(self.cheetahVarStartToken)) elif self.matchIdentifier() and not onDefVal: argList.add_argument( self.getIdentifier() ) elif onDefVal: if self.matchCheetahVarInExpressionStartToken(): token = self.getCheetahVar() elif self.matchCheetahVarStart(): # it has syntax that is only valid at the top level self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr() else: beforeTokenPos = self.pos() token = self.getPyToken() if token in ('{', '(', '['): self.rev() token = self.getExpression(enclosed=True) token = self.transformToken(token, beforeTokenPos) argList.add_default(token) elif c == '*' and not onDefVal: varName = self.getc() if self.peek() == '*': varName += self.getc() if not self.matchIdentifier(): raise ParseError(self) varName += self.getIdentifier() argList.add_argument(varName) else: raise ParseError(self) self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above return argList.merge() def getExpressionParts(self, enclosed=False, enclosures=None, # list of tuples (char, pos), where char is ({ or [ pyTokensToBreakAt=None, # only works if not enclosed useNameMapper=Unspecified, ): """ Get a Cheetah expression that includes $CheetahVars and break at directive end tokens, the end of an enclosure, or at a specified pyToken. """ if useNameMapper is not Unspecified: useNameMapper_orig = self.setting('useNameMapper') self.setSetting('useNameMapper', useNameMapper) if enclosures is None: enclosures = [] srcLen = len(self) exprBits = [] while True: if self.atEnd(): if enclosures: open = enclosures[-1][0] close = closurePairsRev[open] self.setPos(enclosures[-1][1]) raise ParseError( self, msg="EOF was reached before a matching '" + close + "' was found for the '" + open + "'") else: break c = self.peek() if c in "{([": exprBits.append(c) enclosures.append( (c, self.pos()) ) self.advance() elif enclosed and not enclosures: break elif c in "])}": if not enclosures: raise ParseError(self) open = closurePairs[c] if enclosures[-1][0] == open: enclosures.pop() exprBits.append(c) else: open = enclosures[-1][0] close = closurePairsRev[open] row, col = self.getRowCol() self.setPos(enclosures[-1][1]) raise ParseError( self, msg= "A '" + c + "' was found at line " + str(row) + ", col " + str(col) + " before a matching '" + close + "' was found\nfor the '" + open + "'") self.advance() elif c in " \f\t": exprBits.append(self.getWhiteSpace()) elif self.matchDirectiveEndToken() and not enclosures: break elif c == "\\" and self.pos()+1 < srcLen: eolMatch = EOLre.match(self.src(), self.pos()+1) if not eolMatch: self.advance() raise ParseError(self, msg='Line ending expected') self.setPos( eolMatch.end() ) elif c in '\r\n': if enclosures: self.advance() else: break elif self.matchCheetahVarInExpressionStartToken(): expr = self.getCheetahVar() exprBits.append(expr) elif self.matchCheetahVarStart(): # it has syntax that is only valid at the top level self._raiseErrorAboutInvalidCheetahVarSyntaxInExpr() else: beforeTokenPos = self.pos() token = self.getPyToken() if (not enclosures and pyTokensToBreakAt and token in pyTokensToBreakAt): self.setPos(beforeTokenPos) break token = self.transformToken(token, beforeTokenPos) exprBits.append(token) if identRE.match(token): if token == 'for': expr = self.getExpression(useNameMapper=False, pyTokensToBreakAt=['in']) exprBits.append(expr) else: exprBits.append(self.getWhiteSpace()) if not self.atEnd() and self.peek() == '(': exprBits.append(self.getCallArgString()) ## if useNameMapper is not Unspecified: self.setSetting('useNameMapper', useNameMapper_orig) # @@TR: see comment above return exprBits def getExpression(self, enclosed=False, enclosures=None, # list of tuples (char, pos), where # char is ({ or [ pyTokensToBreakAt=None, useNameMapper=Unspecified, ): """Returns the output of self.getExpressionParts() as a concatenated string rather than as a list. """ return ''.join(self.getExpressionParts( enclosed=enclosed, enclosures=enclosures, pyTokensToBreakAt=pyTokensToBreakAt, useNameMapper=useNameMapper)) def transformToken(self, token, beforeTokenPos): """Takes a token from the expression being parsed and performs and special transformations required by Cheetah. At the moment only Cheetah's c'$placeholder strings' are transformed. """ if token=='c' and not self.atEnd() and self.peek() in '\'"': nextToken = self.getPyToken() token = nextToken.upper() theStr = eval(token) endPos = self.pos() if not theStr: return if token.startswith(single3) or token.startswith(double3): startPosIdx = 3 else: startPosIdx = 1 self.setPos(beforeTokenPos+startPosIdx+1) outputExprs = [] strConst = '' while self.pos() < (endPos-startPosIdx): if self.matchCheetahVarStart() or self.matchExpressionPlaceholderStart(): if strConst: outputExprs.append(repr(strConst)) strConst = '' placeholderExpr = self.getPlaceholder() outputExprs.append('str('+placeholderExpr+')') else: strConst += self.getc() self.setPos(endPos) if strConst: outputExprs.append(repr(strConst)) token = "''.join(["+','.join(outputExprs)+"])" return token def _raiseErrorAboutInvalidCheetahVarSyntaxInExpr(self): match = self.matchCheetahVarStart() groupdict = match.groupdict() if groupdict.get('cacheToken'): raise ParseError( self, msg='Cache tokens are not valid inside expressions. ' 'Use them in top-level $placeholders only.') elif groupdict.get('enclosure'): raise ParseError( self, msg='Long-form placeholders - ${}, $(), $[], etc. are not valid inside expressions. ' 'Use them in top-level $placeholders only.') else: raise ParseError( self, msg='This form of $placeholder syntax is not valid here.') def getPlaceholder(self, allowCacheTokens=False, plain=False, returnEverything=False): # filtered for callback in self.setting('preparsePlaceholderHooks'): callback(parser=self) startPos = self.pos() lineCol = self.getRowCol(startPos) startToken = self.getCheetahVarStartToken() silentPlaceholderToken = self.getSilentPlaceholderToken() if silentPlaceholderToken: isSilentPlaceholder = True else: isSilentPlaceholder = False if allowCacheTokens: cacheToken = self.getCacheToken() cacheTokenParts = self.cacheTokenRE.match(cacheToken).groupdict() else: cacheTokenParts = {} if self.peek() in '({[': pos = self.pos() enclosureOpenChar = self.getc() enclosures = [ (enclosureOpenChar, pos) ] self.getWhiteSpace() else: enclosures = [] filterArgs = None if self.matchIdentifier(): nameChunks = self.getCheetahVarNameChunks() expr = self._compiler.genCheetahVar(nameChunks[:], plain=plain) restOfExpr = None if enclosures: WS = self.getWhiteSpace() expr += WS if self.setting('allowPlaceholderFilterArgs') and self.peek()==',': filterArgs = self.getCallArgString(enclosures=enclosures)[1:-1] else: if self.peek()==closurePairsRev[enclosureOpenChar]: self.getc() else: restOfExpr = self.getExpression(enclosed=True, enclosures=enclosures) if restOfExpr[-1] == closurePairsRev[enclosureOpenChar]: restOfExpr = restOfExpr[:-1] expr += restOfExpr rawPlaceholder = self[startPos: self.pos()] else: expr = self.getExpression(enclosed=True, enclosures=enclosures) if expr[-1] == closurePairsRev[enclosureOpenChar]: expr = expr[:-1] rawPlaceholder=self[startPos: self.pos()] expr = self._applyExpressionFilters(expr, 'placeholder', rawExpr=rawPlaceholder, startPos=startPos) for callback in self.setting('postparsePlaceholderHooks'): callback(parser=self) if returnEverything: return (expr, rawPlaceholder, lineCol, cacheTokenParts, filterArgs, isSilentPlaceholder) else: return expr class _HighLevelParser(_LowLevelParser): """This class is a StateMachine for parsing Cheetah source and sending state dependent code generation commands to Cheetah.Compiler.Compiler. """ def __init__(self, src, filename=None, breakPoint=None, compiler=None): super(_HighLevelParser, self).__init__(src, filename=filename, breakPoint=breakPoint) self.setSettingsManager(compiler) self._compiler = compiler self.setupState() self.configureParser() def setupState(self): self._macros = {} self._macroDetails = {} self._openDirectivesStack = [] def cleanup(self): """Cleanup to remove any possible reference cycles """ self._macros.clear() for macroname, macroDetails in self._macroDetails.items(): macroDetails.template.shutdown() del macroDetails.template self._macroDetails.clear() def configureParser(self): super(_HighLevelParser, self).configureParser() self._initDirectives() def _initDirectives(self): def normalizeParserVal(val): if isinstance(val, (str, unicode)): handler = getattr(self, val) elif type(val) in (ClassType, TypeType): handler = val(self) elif hasattr(val, '__call__'): handler = val elif val is None: handler = val else: raise Exception('Invalid parser/handler value %r for %s'%(val, name)) return handler normalizeHandlerVal = normalizeParserVal _directiveNamesAndParsers = directiveNamesAndParsers.copy() customNamesAndParsers = self.setting('directiveNamesAndParsers', {}) _directiveNamesAndParsers.update(customNamesAndParsers) _endDirectiveNamesAndHandlers = endDirectiveNamesAndHandlers.copy() customNamesAndHandlers = self.setting('endDirectiveNamesAndHandlers', {}) _endDirectiveNamesAndHandlers.update(customNamesAndHandlers) self._directiveNamesAndParsers = {} for name, val in _directiveNamesAndParsers.items(): if val in (False, 0): continue self._directiveNamesAndParsers[name] = normalizeParserVal(val) self._endDirectiveNamesAndHandlers = {} for name, val in _endDirectiveNamesAndHandlers.items(): if val in (False, 0): continue self._endDirectiveNamesAndHandlers[name] = normalizeHandlerVal(val) self._closeableDirectives = ['def', 'block', 'closure', 'defmacro', 'call', 'capture', 'cache', 'filter', 'if', 'unless', 'for', 'while', 'repeat', 'try', ] for directiveName in self.setting('closeableDirectives', []): self._closeableDirectives.append(directiveName) macroDirectives = self.setting('macroDirectives', {}) macroDirectives['i18n'] = I18n for macroName, callback in macroDirectives.items(): if type(callback) in (ClassType, TypeType): callback = callback(parser=self) assert callback self._macros[macroName] = callback self._directiveNamesAndParsers[macroName] = self.eatMacroCall def _applyExpressionFilters(self, expr, exprType, rawExpr=None, startPos=None): """Pipes cheetah expressions through a set of optional filter hooks. The filters are functions which may modify the expressions or raise a ForbiddenExpression exception if the expression is not allowed. They are defined in the compiler setting 'expressionFilterHooks'. Some intended use cases: - to implement 'restricted execution' safeguards in cases where you can't trust the author of the template. - to enforce style guidelines filter call signature: (parser, expr, exprType, rawExpr=None, startPos=None) - parser is the Cheetah parser - expr is the expression to filter. In some cases the parser will have already modified it from the original source code form. For example, placeholders will have been translated into namemapper calls. If you need to work with the original source, see rawExpr. - exprType is the name of the directive, 'psp', or 'placeholder'. All lowercase. @@TR: These will eventually be replaced with a set of constants. - rawExpr is the original source string that Cheetah parsed. This might be None in some cases. - startPos is the character position in the source string/file where the parser started parsing the current expression. @@TR: I realize this use of the term 'expression' is a bit wonky as many of the 'expressions' are actually statements, but I haven't thought of a better name yet. Suggestions? """ for callback in self.setting('expressionFilterHooks'): expr = callback(parser=self, expr=expr, exprType=exprType, rawExpr=rawExpr, startPos=startPos) return expr def _filterDisabledDirectives(self, directiveName): directiveName = directiveName.lower() if (directiveName in self.setting('disabledDirectives') or (self.setting('enabledDirectives') and directiveName not in self.setting('enabledDirectives'))): for callback in self.setting('disabledDirectiveHooks'): callback(parser=self, directiveName=directiveName) raise ForbiddenDirective(self, msg='This %r directive is disabled'%directiveName) ## main parse loop def parse(self, breakPoint=None, assertEmptyStack=True): if breakPoint: origBP = self.breakPoint() self.setBreakPoint(breakPoint) assertEmptyStack = False while not self.atEnd(): if self.matchCommentStartToken(): self.eatComment() elif self.matchMultiLineCommentStartToken(): self.eatMultiLineComment() elif self.matchVariablePlaceholderStart(): self.eatPlaceholder() elif self.matchExpressionPlaceholderStart(): self.eatPlaceholder() elif self.matchDirective(): self.eatDirective() elif self.matchPSPStartToken(): self.eatPSP() elif self.matchEOLSlurpToken(): self.eatEOLSlurpToken() else: self.eatPlainText() if assertEmptyStack: self.assertEmptyOpenDirectivesStack() if breakPoint: self.setBreakPoint(origBP) ## non-directive eat methods def eatPlainText(self): startPos = self.pos() match = None while not self.atEnd(): match = self.matchTopLevelToken() if match: break else: self.advance() strConst = self.readTo(self.pos(), start=startPos) strConst = self._unescapeCheetahVars(strConst) strConst = self._unescapeDirectives(strConst) self._compiler.addStrConst(strConst) return match def eatComment(self): isLineClearToStartToken = self.isLineClearToStartToken() if isLineClearToStartToken: self._compiler.handleWSBeforeDirective() self.getCommentStartToken() comm = self.readToEOL(gobble=isLineClearToStartToken) self._compiler.addComment(comm) def eatMultiLineComment(self): isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLine = self.findEOL() self.getMultiLineCommentStartToken() endPos = startPos = self.pos() level = 1 while True: endPos = self.pos() if self.atEnd(): break if self.matchMultiLineCommentStartToken(): self.getMultiLineCommentStartToken() level += 1 elif self.matchMultiLineCommentEndToken(): self.getMultiLineCommentEndToken() level -= 1 if not level: break self.advance() comm = self.readTo(endPos, start=startPos) if not self.atEnd(): self.getMultiLineCommentEndToken() if (not self.atEnd()) and self.setting('gobbleWhitespaceAroundMultiLineComments'): restOfLine = self[self.pos():self.findEOL()] if not restOfLine.strip(): # WS only to EOL self.readToEOL(gobble=isLineClearToStartToken) if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLine): self._compiler.handleWSBeforeDirective() self._compiler.addComment(comm) def eatPlaceholder(self): (expr, rawPlaceholder, lineCol, cacheTokenParts, filterArgs, isSilentPlaceholder) = self.getPlaceholder( allowCacheTokens=True, returnEverything=True) self._compiler.addPlaceholder( expr, filterArgs=filterArgs, rawPlaceholder=rawPlaceholder, cacheTokenParts=cacheTokenParts, lineCol=lineCol, silentMode=isSilentPlaceholder) return def eatPSP(self): # filtered self._filterDisabledDirectives(directiveName='psp') self.getPSPStartToken() endToken = self.setting('PSPEndToken') startPos = self.pos() while not self.atEnd(): if self.peek() == endToken[0]: if self.matchPSPEndToken(): break self.advance() pspString = self.readTo(self.pos(), start=startPos).strip() pspString = self._applyExpressionFilters(pspString, 'psp', startPos=startPos) self._compiler.addPSP(pspString) self.getPSPEndToken() ## generic directive eat methods _simpleIndentingDirectives = ''' else elif for while repeat unless try except finally'''.split() _simpleExprDirectives = ''' pass continue stop return yield break del assert raise silent echo import from'''.split() _directiveHandlerNames = {'import': 'addImportStatement', 'from': 'addImportStatement', } def eatDirective(self): directiveName = self.matchDirective() self._filterDisabledDirectives(directiveName) for callback in self.setting('preparseDirectiveHooks'): callback(parser=self, directiveName=directiveName) # subclasses can override the default behaviours here by providing an # eater method in self._directiveNamesAndParsers[directiveName] directiveParser = self._directiveNamesAndParsers.get(directiveName) if directiveParser: directiveParser() elif directiveName in self._simpleIndentingDirectives: handlerName = self._directiveHandlerNames.get(directiveName) if not handlerName: handlerName = 'add'+directiveName.capitalize() handler = getattr(self._compiler, handlerName) self.eatSimpleIndentingDirective(directiveName, callback=handler) elif directiveName in self._simpleExprDirectives: handlerName = self._directiveHandlerNames.get(directiveName) if not handlerName: handlerName = 'add'+directiveName.capitalize() handler = getattr(self._compiler, handlerName) if directiveName in ('silent', 'echo'): includeDirectiveNameInExpr = False else: includeDirectiveNameInExpr = True expr = self.eatSimpleExprDirective( directiveName, includeDirectiveNameInExpr=includeDirectiveNameInExpr) handler(expr) ## for callback in self.setting('postparseDirectiveHooks'): callback(parser=self, directiveName=directiveName) def _eatRestOfDirectiveTag(self, isLineClearToStartToken, endOfFirstLinePos): foundComment = False if self.matchCommentStartToken(): pos = self.pos() self.advance() if not self.matchDirective(): self.setPos(pos) foundComment = True self.eatComment() # this won't gobble the EOL else: self.setPos(pos) if not foundComment and self.matchDirectiveEndToken(): self.getDirectiveEndToken() elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n': # still gobble the EOL if a comment was found. self.readToEOL(gobble=True) if isLineClearToStartToken and (self.atEnd() or self.pos() > endOfFirstLinePos): self._compiler.handleWSBeforeDirective() def _eatToThisEndDirective(self, directiveName): finalPos = endRawPos = startPos = self.pos() directiveChar = self.setting('directiveStartToken')[0] isLineClearToStartToken = False while not self.atEnd(): if self.peek() == directiveChar: if self.matchDirective() == 'end': endRawPos = self.pos() self.getDirectiveStartToken() self.advance(len('end')) self.getWhiteSpace() if self.startswith(directiveName): if self.isLineClearToStartToken(endRawPos): isLineClearToStartToken = True endRawPos = self.findBOL(endRawPos) self.advance(len(directiveName)) # to end of directiveName self.getWhiteSpace() finalPos = self.pos() break self.advance() finalPos = endRawPos = self.pos() textEaten = self.readTo(endRawPos, start=startPos) self.setPos(finalPos) endOfFirstLinePos = self.findEOL() if self.matchDirectiveEndToken(): self.getDirectiveEndToken() elif isLineClearToStartToken and (not self.atEnd()) and self.peek() in '\r\n': self.readToEOL(gobble=True) if isLineClearToStartToken and self.pos() > endOfFirstLinePos: self._compiler.handleWSBeforeDirective() return textEaten def eatSimpleExprDirective(self, directiveName, includeDirectiveNameInExpr=True): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLine = self.findEOL() self.getDirectiveStartToken() if not includeDirectiveNameInExpr: self.advance(len(directiveName)) startPos = self.pos() expr = self.getExpression().strip() directiveName = expr.split()[0] expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos) if directiveName in self._closeableDirectives: self.pushToOpenDirectivesStack(directiveName) self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) return expr def eatSimpleIndentingDirective(self, directiveName, callback, includeDirectiveNameInExpr=False): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() lineCol = self.getRowCol() self.getDirectiveStartToken() if directiveName not in 'else elif for while try except finally'.split(): self.advance(len(directiveName)) startPos = self.pos() self.getWhiteSpace() expr = self.getExpression(pyTokensToBreakAt=[':']) expr = self._applyExpressionFilters(expr, directiveName, startPos=startPos) if self.matchColonForSingleLineShortFormDirective(): self.advance() # skip over : if directiveName in 'else elif except finally'.split(): callback(expr, dedent=False, lineCol=lineCol) else: callback(expr, lineCol=lineCol) self.getWhiteSpace(max=1) self.parse(breakPoint=self.findEOL(gobble=True)) self._compiler.commitStrConst() self._compiler.dedent() else: if self.peek()==':': self.advance() self.getWhiteSpace() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) if directiveName in self._closeableDirectives: self.pushToOpenDirectivesStack(directiveName) callback(expr, lineCol=lineCol) def eatEndDirective(self): isLineClearToStartToken = self.isLineClearToStartToken() self.getDirectiveStartToken() self.advance(3) # to end of 'end' self.getWhiteSpace() pos = self.pos() directiveName = False for key in self._endDirectiveNamesAndHandlers.keys(): if self.find(key, pos) == pos: directiveName = key break if not directiveName: raise ParseError(self, msg='Invalid end directive') endOfFirstLinePos = self.findEOL() self.getExpression() # eat in any extra comment-like crap self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) if directiveName in self._closeableDirectives: self.popFromOpenDirectivesStack(directiveName) # subclasses can override the default behaviours here by providing an # end-directive handler in self._endDirectiveNamesAndHandlers[directiveName] if self._endDirectiveNamesAndHandlers.get(directiveName): handler = self._endDirectiveNamesAndHandlers[directiveName] handler() elif directiveName in 'block capture cache call filter errorCatcher'.split(): if key == 'block': self._compiler.closeBlock() elif key == 'capture': self._compiler.endCaptureRegion() elif key == 'cache': self._compiler.endCacheRegion() elif key == 'call': self._compiler.endCallRegion() elif key == 'filter': self._compiler.closeFilterBlock() elif key == 'errorCatcher': self._compiler.turnErrorCatcherOff() elif directiveName in 'while for if try repeat unless'.split(): self._compiler.commitStrConst() self._compiler.dedent() elif directiveName=='closure': self._compiler.commitStrConst() self._compiler.dedent() # @@TR: temporary hack of useSearchList self.setSetting('useSearchList', self._useSearchList_orig) ## specific directive eat methods def eatBreakPoint(self): """Tells the parser to stop parsing at this point and completely ignore everything else. This is a debugging tool. """ self.setBreakPoint(self.pos()) def eatShbang(self): # filtered self.getDirectiveStartToken() self.advance(len('shBang')) self.getWhiteSpace() startPos = self.pos() shBang = self.readToEOL() shBang = self._applyExpressionFilters(shBang, 'shbang', startPos=startPos) self._compiler.setShBang(shBang.strip()) def eatEncoding(self): # filtered self.getDirectiveStartToken() self.advance(len('encoding')) self.getWhiteSpace() startPos = self.pos() encoding = self.readToEOL() encoding = self._applyExpressionFilters(encoding, 'encoding', startPos=startPos) self._compiler.setModuleEncoding(encoding.strip()) def eatCompiler(self): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLine = self.findEOL() startPos = self.pos() self.getDirectiveStartToken() self.advance(len('compiler')) # to end of 'compiler' self.getWhiteSpace() startPos = self.pos() settingName = self.getIdentifier() if settingName.lower() == 'reset': self.getExpression() # gobble whitespace & junk self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) self._initializeSettings() self.configureParser() return self.getWhiteSpace() if self.peek() == '=': self.advance() else: raise ParseError(self) valueExpr = self.getExpression() endPos = self.pos() # @@TR: it's unlikely that anyone apply filters would have left this # directive enabled: # @@TR: fix up filtering, regardless self._applyExpressionFilters('%s=%r'%(settingName, valueExpr), 'compiler', startPos=startPos) self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) try: self._compiler.setCompilerSetting(settingName, valueExpr) except: sys.stderr.write('An error occurred while processing the following #compiler directive.\n') sys.stderr.write('----------------------------------------------------------------------\n') sys.stderr.write('%s\n' % self[startPos:endPos]) sys.stderr.write('----------------------------------------------------------------------\n') sys.stderr.write('Please check the syntax of these settings.\n\n') raise def eatCompilerSettings(self): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLine = self.findEOL() self.getDirectiveStartToken() self.advance(len('compiler-settings')) # to end of 'settings' keywords = self.getTargetVarsList() self.getExpression() # gobble any garbage self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) if 'reset' in keywords: self._compiler._initializeSettings() self.configureParser() # @@TR: this implies a single-line #compiler-settings directive, and # thus we should parse forward for an end directive. # Subject to change in the future return startPos = self.pos() settingsStr = self._eatToThisEndDirective('compiler-settings') settingsStr = self._applyExpressionFilters(settingsStr, 'compilerSettings', startPos=startPos) try: self._compiler.setCompilerSettings(keywords=keywords, settingsStr=settingsStr) except: sys.stderr.write('An error occurred while processing the following compiler settings.\n') sys.stderr.write('----------------------------------------------------------------------\n') sys.stderr.write('%s\n' % settingsStr.strip()) sys.stderr.write('----------------------------------------------------------------------\n') sys.stderr.write('Please check the syntax of these settings.\n\n') raise def eatAttr(self): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() startPos = self.pos() self.getDirectiveStartToken() self.advance(len('attr')) self.getWhiteSpace() startPos = self.pos() if self.matchCheetahVarStart(): self.getCheetahVarStartToken() attribName = self.getIdentifier() self.getWhiteSpace() self.getAssignmentOperator() expr = self.getExpression() expr = self._applyExpressionFilters(expr, 'attr', startPos=startPos) self._compiler.addAttribute(attribName, expr) self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) def eatDecorator(self): isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() startPos = self.pos() self.getDirectiveStartToken() #self.advance() # eat @ startPos = self.pos() decoratorExpr = self.getExpression() decoratorExpr = self._applyExpressionFilters(decoratorExpr, 'decorator', startPos=startPos) self._compiler.addDecorator(decoratorExpr) self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) self.getWhiteSpace() directiveName = self.matchDirective() if not directiveName or directiveName not in ('def', 'block', 'closure', '@'): raise ParseError( self, msg='Expected #def, #block, #closure or another @decorator') self.eatDirective() def eatDef(self): # filtered self._eatDefOrBlock('def') def eatBlock(self): # filtered startPos = self.pos() methodName, rawSignature = self._eatDefOrBlock('block') self._compiler._blockMetaData[methodName] = { 'raw': rawSignature, 'lineCol': self.getRowCol(startPos), } def eatClosure(self): # filtered self._eatDefOrBlock('closure') def _eatDefOrBlock(self, directiveName): # filtered assert directiveName in ('def', 'block', 'closure') isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() startPos = self.pos() self.getDirectiveStartToken() self.advance(len(directiveName)) self.getWhiteSpace() if self.matchCheetahVarStart(): self.getCheetahVarStartToken() methodName = self.getIdentifier() self.getWhiteSpace() if self.peek() == '(': argsList = self.getDefArgList() self.advance() # past the closing ')' if argsList and argsList[0][0] == 'self': del argsList[0] else: argsList=[] def includeBlockMarkers(): if self.setting('includeBlockMarkers'): startMarker = self.setting('blockMarkerStart') self._compiler.addStrConst(startMarker[0] + methodName + startMarker[1]) # @@TR: fix up filtering self._applyExpressionFilters(self[startPos:self.pos()], 'def', startPos=startPos) if self.matchColonForSingleLineShortFormDirective(): isNestedDef = (self.setting('allowNestedDefScopes') and [name for name in self._openDirectivesStack if name=='def']) self.getc() rawSignature = self[startPos:endOfFirstLinePos] self._eatSingleLineDef(directiveName=directiveName, methodName=methodName, argsList=argsList, startPos=startPos, endPos=endOfFirstLinePos) if directiveName == 'def' and not isNestedDef: #@@TR: must come before _eatRestOfDirectiveTag ... for some reason self._compiler.closeDef() elif directiveName == 'block': includeBlockMarkers() self._compiler.closeBlock() elif directiveName == 'closure' or isNestedDef: self._compiler.dedent() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) else: if self.peek()==':': self.getc() self.pushToOpenDirectivesStack(directiveName) rawSignature = self[startPos:self.pos()] self._eatMultiLineDef(directiveName=directiveName, methodName=methodName, argsList=argsList, startPos=startPos, isLineClearToStartToken=isLineClearToStartToken) if directiveName == 'block': includeBlockMarkers() return methodName, rawSignature def _eatMultiLineDef(self, directiveName, methodName, argsList, startPos, isLineClearToStartToken=False): # filtered in calling method self.getExpression() # slurp up any garbage left at the end signature = self[startPos:self.pos()] endOfFirstLinePos = self.findEOL() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) signature = ' '.join([line.strip() for line in signature.splitlines()]) parserComment = ('## CHEETAH: generated from ' + signature + ' at line %s, col %s' % self.getRowCol(startPos) + '.') isNestedDef = (self.setting('allowNestedDefScopes') and len([name for name in self._openDirectivesStack if name=='def'])>1) if directiveName=='block' or (directiveName=='def' and not isNestedDef): self._compiler.startMethodDef(methodName, argsList, parserComment) else: #closure self._useSearchList_orig = self.setting('useSearchList') self.setSetting('useSearchList', False) self._compiler.addClosure(methodName, argsList, parserComment) return methodName def _eatSingleLineDef(self, directiveName, methodName, argsList, startPos, endPos): # filtered in calling method fullSignature = self[startPos:endPos] parserComment = ('## Generated from ' + fullSignature + ' at line %s, col %s' % self.getRowCol(startPos) + '.') isNestedDef = (self.setting('allowNestedDefScopes') and [name for name in self._openDirectivesStack if name=='def']) if directiveName=='block' or (directiveName=='def' and not isNestedDef): self._compiler.startMethodDef(methodName, argsList, parserComment) else: #closure # @@TR: temporary hack of useSearchList useSearchList_orig = self.setting('useSearchList') self.setSetting('useSearchList', False) self._compiler.addClosure(methodName, argsList, parserComment) self.getWhiteSpace(max=1) self.parse(breakPoint=endPos) if directiveName=='closure' or isNestedDef: # @@TR: temporary hack of useSearchList self.setSetting('useSearchList', useSearchList_orig) def eatExtends(self): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLine = self.findEOL() self.getDirectiveStartToken() self.advance(len('extends')) self.getWhiteSpace() startPos = self.pos() if self.setting('allowExpressionsInExtendsDirective'): baseName = self.getExpression() else: baseName = self.getCommaSeparatedSymbols() baseName = ', '.join(baseName) baseName = self._applyExpressionFilters(baseName, 'extends', startPos=startPos) self._compiler.setBaseClass(baseName) # in compiler self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) def eatImplements(self): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLine = self.findEOL() self.getDirectiveStartToken() self.advance(len('implements')) self.getWhiteSpace() startPos = self.pos() methodName = self.getIdentifier() if not self.atEnd() and self.peek() == '(': argsList = self.getDefArgList() self.advance() # past the closing ')' if argsList and argsList[0][0] == 'self': del argsList[0] else: argsList=[] # @@TR: need to split up filtering of the methodname and the args #methodName = self._applyExpressionFilters(methodName, 'implements', startPos=startPos) self._applyExpressionFilters(self[startPos:self.pos()], 'implements', startPos=startPos) self._compiler.setMainMethodName(methodName) self._compiler.setMainMethodArgs(argsList) self.getExpression() # throw away and unwanted crap that got added in self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) def eatSuper(self): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLine = self.findEOL() self.getDirectiveStartToken() self.advance(len('super')) self.getWhiteSpace() startPos = self.pos() if not self.atEnd() and self.peek() == '(': argsList = self.getDefArgList() self.advance() # past the closing ')' if argsList and argsList[0][0] == 'self': del argsList[0] else: argsList=[] self._applyExpressionFilters(self[startPos:self.pos()], 'super', startPos=startPos) #parserComment = ('## CHEETAH: generated from ' + signature + # ' at line %s, col %s' % self.getRowCol(startPos) # + '.') self.getExpression() # throw away and unwanted crap that got added in self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) self._compiler.addSuper(argsList) def eatSet(self): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLine = self.findEOL() self.getDirectiveStartToken() self.advance(3) self.getWhiteSpace() style = SET_LOCAL if self.startswith('local'): self.getIdentifier() self.getWhiteSpace() elif self.startswith('global'): self.getIdentifier() self.getWhiteSpace() style = SET_GLOBAL elif self.startswith('module'): self.getIdentifier() self.getWhiteSpace() style = SET_MODULE startsWithDollar = self.matchCheetahVarStart() startPos = self.pos() LVALUE = self.getExpression(pyTokensToBreakAt=assignmentOps, useNameMapper=False).strip() OP = self.getAssignmentOperator() RVALUE = self.getExpression() expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip() expr = self._applyExpressionFilters(expr, 'set', startPos=startPos) self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) class Components: pass # used for 'set global' exprComponents = Components() exprComponents.LVALUE = LVALUE exprComponents.OP = OP exprComponents.RVALUE = RVALUE self._compiler.addSet(expr, exprComponents, style) def eatSlurp(self): if self.isLineClearToStartToken(): self._compiler.handleWSBeforeDirective() self._compiler.commitStrConst() self.readToEOL(gobble=True) def eatEOLSlurpToken(self): if self.isLineClearToStartToken(): self._compiler.handleWSBeforeDirective() self._compiler.commitStrConst() self.readToEOL(gobble=True) def eatRaw(self): isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() self.getDirectiveStartToken() self.advance(len('raw')) self.getWhiteSpace() if self.matchColonForSingleLineShortFormDirective(): self.advance() # skip over : self.getWhiteSpace(max=1) rawBlock = self.readToEOL(gobble=False) else: if self.peek()==':': self.advance() self.getWhiteSpace() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) rawBlock = self._eatToThisEndDirective('raw') self._compiler.addRawText(rawBlock) def eatInclude(self): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() self.getDirectiveStartToken() self.advance(len('include')) self.getWhiteSpace() includeFrom = 'file' isRaw = False if self.startswith('raw'): self.advance(3) isRaw=True self.getWhiteSpace() if self.startswith('source'): self.advance(len('source')) includeFrom = 'str' self.getWhiteSpace() if not self.peek() == '=': raise ParseError(self) self.advance() startPos = self.pos() sourceExpr = self.getExpression() sourceExpr = self._applyExpressionFilters(sourceExpr, 'include', startPos=startPos) self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) self._compiler.addInclude(sourceExpr, includeFrom, isRaw) def eatDefMacro(self): # @@TR: not filtered yet isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() self.getDirectiveStartToken() self.advance(len('defmacro')) self.getWhiteSpace() if self.matchCheetahVarStart(): self.getCheetahVarStartToken() macroName = self.getIdentifier() self.getWhiteSpace() if self.peek() == '(': argsList = self.getDefArgList(useNameMapper=False) self.advance() # past the closing ')' if argsList and argsList[0][0] == 'self': del argsList[0] else: argsList=[] assert macroName not in self._directiveNamesAndParsers argsList.insert(0, ('src', None)) argsList.append(('parser', 'None')) argsList.append(('macros', 'None')) argsList.append(('compilerSettings', 'None')) argsList.append(('isShortForm', 'None')) argsList.append(('EOLCharsInShortForm', 'None')) argsList.append(('startPos', 'None')) argsList.append(('endPos', 'None')) if self.matchColonForSingleLineShortFormDirective(): self.advance() # skip over : self.getWhiteSpace(max=1) macroSrc = self.readToEOL(gobble=False) self.readToEOL(gobble=True) else: if self.peek()==':': self.advance() self.getWhiteSpace() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) macroSrc = self._eatToThisEndDirective('defmacro') #print argsList normalizedMacroSrc = ''.join( ['%def callMacro('+','.join([defv and '%s=%s'%(n, defv) or n for n, defv in argsList]) +')\n', macroSrc, '%end def']) from Cheetah.Template import Template templateAPIClass = self.setting('templateAPIClassForDefMacro', default=Template) compilerSettings = self.setting('compilerSettingsForDefMacro', default={}) searchListForMacros = self.setting('searchListForDefMacro', default=[]) searchListForMacros = list(searchListForMacros) # copy to avoid mutation bugs searchListForMacros.append({'macros': self._macros, 'parser': self, 'compilerSettings': self.settings(), }) templateAPIClass._updateSettingsWithPreprocessTokens( compilerSettings, placeholderToken='@', directiveToken='%') macroTemplateClass = templateAPIClass.compile(source=normalizedMacroSrc, compilerSettings=compilerSettings) #print normalizedMacroSrc #t = macroTemplateClass() #print t.callMacro('src') #print t.generatedClassCode() class MacroDetails: pass macroDetails = MacroDetails() macroDetails.macroSrc = macroSrc macroDetails.argsList = argsList macroDetails.template = macroTemplateClass(searchList=searchListForMacros) self._macroDetails[macroName] = macroDetails self._macros[macroName] = macroDetails.template.callMacro self._directiveNamesAndParsers[macroName] = self.eatMacroCall def eatMacroCall(self): isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() startPos = self.pos() self.getDirectiveStartToken() macroName = self.getIdentifier() macro = self._macros[macroName] if hasattr(macro, 'parse'): return macro.parse(parser=self, startPos=startPos) if hasattr(macro, 'parseArgs'): args = macro.parseArgs(parser=self, startPos=startPos) else: self.getWhiteSpace() args = self.getExpression(useNameMapper=False, pyTokensToBreakAt=[':']).strip() if self.matchColonForSingleLineShortFormDirective(): isShortForm = True self.advance() # skip over : self.getWhiteSpace(max=1) srcBlock = self.readToEOL(gobble=False) EOLCharsInShortForm = self.readToEOL(gobble=True) #self.readToEOL(gobble=False) else: isShortForm = False if self.peek()==':': self.advance() self.getWhiteSpace() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) srcBlock = self._eatToThisEndDirective(macroName) if hasattr(macro, 'convertArgStrToDict'): kwArgs = macro.convertArgStrToDict(args, parser=self, startPos=startPos) else: def getArgs(*pargs, **kws): return pargs, kws exec('positionalArgs, kwArgs = getArgs(%(args)s)'%locals()) assert 'src' not in kwArgs kwArgs['src'] = srcBlock if isinstance(macro, new.instancemethod): co = macro.im_func.func_code elif (hasattr(macro, '__call__') and hasattr(macro.__call__, 'im_func')): co = macro.__call__.im_func.func_code else: co = macro.func_code availableKwArgs = inspect.getargs(co)[0] if 'parser' in availableKwArgs: kwArgs['parser'] = self if 'macros' in availableKwArgs: kwArgs['macros'] = self._macros if 'compilerSettings' in availableKwArgs: kwArgs['compilerSettings'] = self.settings() if 'isShortForm' in availableKwArgs: kwArgs['isShortForm'] = isShortForm if isShortForm and 'EOLCharsInShortForm' in availableKwArgs: kwArgs['EOLCharsInShortForm'] = EOLCharsInShortForm if 'startPos' in availableKwArgs: kwArgs['startPos'] = startPos if 'endPos' in availableKwArgs: kwArgs['endPos'] = self.pos() srcFromMacroOutput = macro(**kwArgs) origParseSrc = self._src origBreakPoint = self.breakPoint() origPos = self.pos() # add a comment to the output about the macro src that is being parsed # or add a comment prefix to all the comments added by the compiler self._src = srcFromMacroOutput self.setPos(0) self.setBreakPoint(len(srcFromMacroOutput)) self.parse(assertEmptyStack=False) self._src = origParseSrc self.setBreakPoint(origBreakPoint) self.setPos(origPos) #self._compiler.addRawText('end') def eatCache(self): isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() lineCol = self.getRowCol() self.getDirectiveStartToken() self.advance(len('cache')) startPos = self.pos() argList = self.getDefArgList(useNameMapper=True) argList = self._applyExpressionFilters(argList, 'cache', startPos=startPos) def startCache(): cacheInfo = self._compiler.genCacheInfoFromArgList(argList) self._compiler.startCacheRegion(cacheInfo, lineCol) if self.matchColonForSingleLineShortFormDirective(): self.advance() # skip over : self.getWhiteSpace(max=1) startCache() self.parse(breakPoint=self.findEOL(gobble=True)) self._compiler.endCacheRegion() else: if self.peek()==':': self.advance() self.getWhiteSpace() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) self.pushToOpenDirectivesStack('cache') startCache() def eatCall(self): # @@TR: need to enable single line version of this isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() lineCol = self.getRowCol() self.getDirectiveStartToken() self.advance(len('call')) startPos = self.pos() useAutocallingOrig = self.setting('useAutocalling') self.setSetting('useAutocalling', False) self.getWhiteSpace() if self.matchCheetahVarStart(): functionName = self.getCheetahVar() else: functionName = self.getCheetahVar(plain=True, skipStartToken=True) self.setSetting('useAutocalling', useAutocallingOrig) # @@TR: fix up filtering self._applyExpressionFilters(self[startPos:self.pos()], 'call', startPos=startPos) self.getWhiteSpace() args = self.getExpression(pyTokensToBreakAt=[':']).strip() if self.matchColonForSingleLineShortFormDirective(): self.advance() # skip over : self._compiler.startCallRegion(functionName, args, lineCol) self.getWhiteSpace(max=1) self.parse(breakPoint=self.findEOL(gobble=False)) self._compiler.endCallRegion() else: if self.peek()==':': self.advance() self.getWhiteSpace() self.pushToOpenDirectivesStack("call") self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) self._compiler.startCallRegion(functionName, args, lineCol) def eatCallArg(self): isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() lineCol = self.getRowCol() self.getDirectiveStartToken() self.advance(len('arg')) startPos = self.pos() self.getWhiteSpace() argName = self.getIdentifier() self.getWhiteSpace() argName = self._applyExpressionFilters(argName, 'arg', startPos=startPos) self._compiler.setCallArg(argName, lineCol) if self.peek() == ':': self.getc() else: self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) def eatFilter(self): isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() self.getDirectiveStartToken() self.advance(len('filter')) self.getWhiteSpace() startPos = self.pos() if self.matchCheetahVarStart(): isKlass = True theFilter = self.getExpression(pyTokensToBreakAt=[':']) else: isKlass = False theFilter = self.getIdentifier() self.getWhiteSpace() theFilter = self._applyExpressionFilters(theFilter, 'filter', startPos=startPos) if self.matchColonForSingleLineShortFormDirective(): self.advance() # skip over : self.getWhiteSpace(max=1) self._compiler.setFilter(theFilter, isKlass) self.parse(breakPoint=self.findEOL(gobble=False)) self._compiler.closeFilterBlock() else: if self.peek()==':': self.advance() self.getWhiteSpace() self.pushToOpenDirectivesStack("filter") self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) self._compiler.setFilter(theFilter, isKlass) def eatTransform(self): isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() self.getDirectiveStartToken() self.advance(len('transform')) self.getWhiteSpace() startPos = self.pos() if self.matchCheetahVarStart(): isKlass = True transformer = self.getExpression(pyTokensToBreakAt=[':']) else: isKlass = False transformer = self.getIdentifier() self.getWhiteSpace() transformer = self._applyExpressionFilters(transformer, 'transform', startPos=startPos) if self.peek()==':': self.advance() self.getWhiteSpace() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) self._compiler.setTransform(transformer, isKlass) def eatErrorCatcher(self): isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() self.getDirectiveStartToken() self.advance(len('errorCatcher')) self.getWhiteSpace() startPos = self.pos() errorCatcherName = self.getIdentifier() errorCatcherName = self._applyExpressionFilters( errorCatcherName, 'errorcatcher', startPos=startPos) self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) self._compiler.setErrorCatcher(errorCatcherName) def eatCapture(self): # @@TR: this could be refactored to use the code in eatSimpleIndentingDirective # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLinePos = self.findEOL() lineCol = self.getRowCol() self.getDirectiveStartToken() self.advance(len('capture')) startPos = self.pos() self.getWhiteSpace() expr = self.getExpression(pyTokensToBreakAt=[':']) expr = self._applyExpressionFilters(expr, 'capture', startPos=startPos) if self.matchColonForSingleLineShortFormDirective(): self.advance() # skip over : self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol) self.getWhiteSpace(max=1) self.parse(breakPoint=self.findEOL(gobble=False)) self._compiler.endCaptureRegion() else: if self.peek()==':': self.advance() self.getWhiteSpace() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLinePos) self.pushToOpenDirectivesStack("capture") self._compiler.startCaptureRegion(assignTo=expr, lineCol=lineCol) def eatIf(self): # filtered isLineClearToStartToken = self.isLineClearToStartToken() endOfFirstLine = self.findEOL() lineCol = self.getRowCol() self.getDirectiveStartToken() startPos = self.pos() expressionParts = self.getExpressionParts(pyTokensToBreakAt=[':']) expr = ''.join(expressionParts).strip() expr = self._applyExpressionFilters(expr, 'if', startPos=startPos) isTernaryExpr = ('then' in expressionParts and 'else' in expressionParts) if isTernaryExpr: conditionExpr = [] trueExpr = [] falseExpr = [] currentExpr = conditionExpr for part in expressionParts: if part.strip()=='then': currentExpr = trueExpr elif part.strip()=='else': currentExpr = falseExpr else: currentExpr.append(part) conditionExpr = ''.join(conditionExpr) trueExpr = ''.join(trueExpr) falseExpr = ''.join(falseExpr) self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) self._compiler.addTernaryExpr(conditionExpr, trueExpr, falseExpr, lineCol=lineCol) elif self.matchColonForSingleLineShortFormDirective(): self.advance() # skip over : self._compiler.addIf(expr, lineCol=lineCol) self.getWhiteSpace(max=1) self.parse(breakPoint=self.findEOL(gobble=True)) self._compiler.commitStrConst() self._compiler.dedent() else: if self.peek()==':': self.advance() self.getWhiteSpace() self._eatRestOfDirectiveTag(isLineClearToStartToken, endOfFirstLine) self.pushToOpenDirectivesStack('if') self._compiler.addIf(expr, lineCol=lineCol) ## end directive handlers def handleEndDef(self): isNestedDef = (self.setting('allowNestedDefScopes') and [name for name in self._openDirectivesStack if name=='def']) if not isNestedDef: self._compiler.closeDef() else: # @@TR: temporary hack of useSearchList self.setSetting('useSearchList', self._useSearchList_orig) self._compiler.commitStrConst() self._compiler.dedent() ### def pushToOpenDirectivesStack(self, directiveName): assert directiveName in self._closeableDirectives self._openDirectivesStack.append(directiveName) def popFromOpenDirectivesStack(self, directiveName): if not self._openDirectivesStack: raise ParseError(self, msg="#end found, but nothing to end") if self._openDirectivesStack[-1] == directiveName: del self._openDirectivesStack[-1] else: raise ParseError(self, msg="#end %s found, expected #end %s" %( directiveName, self._openDirectivesStack[-1])) def assertEmptyOpenDirectivesStack(self): if self._openDirectivesStack: errorMsg = ( "Some #directives are missing their corresponding #end ___ tag: %s" %( ', '.join(self._openDirectivesStack))) raise ParseError(self, msg=errorMsg) ################################################## ## Make an alias to export Parser = _HighLevelParser
[ [ 8, 0, 0.0019, 0.0034, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.0041, 0.0004, 0, 0.66, 0.0132, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0045, 0.0004, 0, 0.66...
[ "\"\"\"\nParser classes for Cheetah's Compiler\n\nClasses:\n ParseError( Exception )\n _LowLevelParser( Cheetah.SourceReader.SourceReader ), basically a lexer\n _HighLevelParser( _LowLevelParser )\n Parser === _HighLevelParser (an alias)", "import os", "import sys", "import re", "from re import DOTALL, ...
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'Michael Liao (askxuefeng@gmail.com)' import datetime from xml.parsers.expat import ParserCreate codes = { 0 : u'龙卷风', # tornado 1 : u'热带风暴', # tropical storm 2 : u'飓风', # hurricane 3 : u'风暴', # severe thunderstorms 4 : u'雷雨', # thunderstorms 5 : u'雨夹雪', # mixed rain and snow 6 : u'雨夹冰雹', # mixed rain and sleet 7 : u'雪夹冰雹', # mixed snow and sleet 8 : u'冰毛毛雨', # freezing drizzle 9 : u'毛毛雨', # drizzle 10 : u'冰雨', # freezing rain 11 : u'阵雨', # showers 12 : u'阵雨', # showers 13 : u'小雪', # snow flurries 14 : u'小雨雪', # light snow showers 15 : u'风雪', # blowing snow 16 : u'下雪', # snow 17 : u'冰雹', # hail 18 : u'雨夹雪', # sleet 19 : u'尘土', # dust 20 : u'雾', # foggy 21 : u'霾', # haze 22 : u'烟雾', # smoky 23 : u'狂风', # blustery 24 : u'大风', # windy 25 : u'寒冷', # cold 26 : u'多云', # cloudy 27 : u'多云', # mostly cloudy (night) 28 : u'多云', # mostly cloudy (day) 29 : u'局部多云', # partly cloudy (night) 30 : u'局部多云', # partly cloudy (day) 31 : u'晴朗', # clear (night) 32 : u'晴', # sunny 33 : u'晴朗', # fair (night) 34 : u'晴朗', # fair (day) 35 : u'雨夹冰雹', # mixed rain and hail 36 : u'炎热', # hot 37 : u'局部雷雨', # isolated thunderstorms 38 : u'零星雷雨', # scattered thunderstorms 39 : u'零星雷雨', # scattered thunderstorms 40 : u'零星阵雨', # scattered showers 41 : u'大雪', # heavy snow 42 : u'零星雨夹雪', # scattered snow showers 43 : u'大雪', # heavy snow 44 : u'局部多云', # partly cloudy 45 : u'雷阵雨', # thundershowers 46 : u'小雪', # snow showers 47 : u'局部雷雨', # isolated thundershowers 3200 : u'暂无数据' # not available } class Wind(object): def __init__(self, chill, direction, speed): self.chill = chill self.direction = direction self.speed = speed def __str__(self): return r'{"chill" : %s, "direction" : %s, "speed" : %s}' % (\ self.chill or "null", self.direction or "null", self.speed or "null" ) __repr__ = __str__ class Atmosphere(object): def __init__(self, humidity, visibility, pressure, rising): self.humidity = humidity self.visibility = visibility self.pressure = pressure self.rising = rising def __str__(self): return r'{"humidity" : %s, "visibility" : %s, "pressure" : %s, "rising": %s}' % (\ self.humidity or "null", self.visibility or "null", self.pressure or "null", self.rising or "null" ) __repr__ = __str__ class Astronomy(object): def __init__(self, sunrise, sunset): self.sunrise = sunrise self.sunset = sunset def __str__(self): return r'{"sunrise" : "%s", "sunset": "%s"}' % (self.sunrise, self.sunset) __repr__ = __str__ class Forecast(object): '<yweather:forecast day="Wed" date="30 Jun 2010" low="24" high="30" text="Mostly Cloudy" code="28" />' def __init__(self, day, date, low, high, code): self.day = day self.date = date self.low = low self.high = high self.code = code def __str__(self): return '{"date" : "%s", "day" : %s, "code" : %s, "text" : "%s", "low" : %d, "high" : %d, "image_large" : "%s", "image_small" : "%s"}' % ( self.date, self.day, self.code, codes[self.code].encode('utf-8'), self.low, self.high, "http://weather.china.xappengine.com/static/w/img/d%s.png" % self.code, "http://weather.china.xappengine.com/static/w/img/s%s.png" % self.code, ) __repr__ = __str__ def index_of(list, data): for i, item in enumerate(list): if data==item: return i return None def get_day(day): return index_of(('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'), day) def get_date(date): '30 Jun 2010' ss = date.split(' ') month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ss[1]) return datetime.date(int(ss[2]), month, int(ss[0])) def f2c(temp): f = float(temp) c = (f - 32) * 5 / 9 + 0.5 return int(c) def to_24hour(time): ' convert "4:39 pm" to "16:39" ' if time.endswith(' am'): return time[:-3] if time.endswith(' pm'): time = time[:-3] n = time.find(':') to_24h = int(time[:n]) + 12 return "%d:%s" % (to_24h, time[n+1:]) return time class Weather(object): def char_data(self, text): if self.__isLastBuildDate: n = text.find(', ') text = text[n+2:] n1 = text.find(' ') n2 = text.find(' ', n1+1) m = text[n1+1:n2] month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), m) text = text.replace(m, str(month)) if not text.endswith(' CST'): return text = text[:-4] is_pm = text.endswith(' pm') text = text[:-3] time = datetime.datetime.strptime(text, '%d %m %Y %I:%M') h = time.hour if is_pm: h = h + 12 self.pub = '%d-%#02d-%#02d %#02d:%#02d' % (time.year, time.month, time.day, h, time.minute) def end_element(self, name): if name=='lastBuildDate': self.__isLastBuildDate = False def start_element(self, name, attrs): if name=='lastBuildDate': self.__isLastBuildDate = True return if name=='yweather:forecast': self.forecasts.append(Forecast( get_day(attrs['day']), get_date(attrs['date']), f2c(attrs['low']), f2c(attrs['high']), int(attrs['code']) )) if name=='yweather:astronomy': self.astronomy.sunrise = to_24hour(attrs['sunrise']) self.astronomy.sunset = to_24hour(attrs['sunset']) if name=='yweather:atmosphere': self.atmosphere.humidity = attrs['humidity'] self.atmosphere.visibility = attrs['visibility'] self.atmosphere.pressure = attrs['pressure'] self.atmosphere.rising = attrs['rising'] if name=='yweather:wind': self.wind.chill = attrs['chill'] self.wind.direction = attrs['direction'] self.wind.speed = attrs['speed'] def __init__(self, name, data): self.__isLastBuildDate = False if isinstance(name, unicode): name = name.encode('utf-8') self.name = name self.pub = None self.wind = Wind(None, None, None) self.atmosphere = Atmosphere(None, None, None, None) self.astronomy = Astronomy(None, None) self.forecasts = [] parser = ParserCreate() parser.returns_unicode = False parser.StartElementHandler = self.start_element parser.EndElementHandler = self.end_element parser.CharacterDataHandler = self.char_data parser.Parse(data) def __str__(self): pub = 'null' if self.pub: pub = r'"%s"' % self.pub return '{"pub" : %s, "name" : "%s", "wind" : %s, "astronomy" : %s, "atmosphere" : %s, "forecasts" : %s}' \ % (pub, self.name, self.wind, self.astronomy, self.atmosphere, self.forecasts) __repr__ = __str__ if __name__=='__main__': import urllib url = 'http://weather.yahooapis.com/forecastrss?u=c&w=2143712' result = urllib.urlopen(url).read() print Weather(result)
[ [ 14, 0, 0.0172, 0.0043, 0, 0.66, 0, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.0258, 0.0043, 0, 0.66, 0.0714, 426, 0, 1, 0, 0, 426, 0, 0 ], [ 1, 0, 0.03, 0.0043, 0, 0.6...
[ "__author__ = 'Michael Liao (askxuefeng@gmail.com)'", "import datetime", "from xml.parsers.expat import ParserCreate", "codes = {\n 0 : u'龙卷风', # tornado\n 1 : u'热带风暴', # tropical storm\n 2 : u'飓风', # hurricane\n 3 : u'风暴', # severe thunderstorms\n 4 : u'雷雨', # thunderstorms...
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'Michael Liao (askxuefeng@gmail.com)' from os import path from Cheetah.Template import Template def main(): file = path.join(path.split(__file__)[0], 'home.html') print 'Compile template %s...' % file cc = Template.compile(source=None, file=file, returnAClass=False, moduleName='autogen', className='CompiledTemplate') target = path.join(path.split(__file__)[0], 'autogen', '__init__.py') print 'Writing file %s...' % target f = open(target, 'w') f.write(cc) f.close() from autogen import CompiledTemplate CompiledTemplate(searchList=[]) print 'Compiled ok.' if __name__ == '__main__': main()
[ [ 14, 0, 0.1739, 0.0435, 0, 0.66, 0, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.2609, 0.0435, 0, 0.66, 0.25, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.3043, 0.0435, 0, 0.6...
[ "__author__ = 'Michael Liao (askxuefeng@gmail.com)'", "from os import path", "from Cheetah.Template import Template", "def main():\n file = path.join(path.split(__file__)[0], 'home.html')\n print('Compile template %s...' % file)\n cc = Template.compile(source=None, file=file, returnAClass=False, modu...
#!/usr/bin/env python # -*- coding: utf-8 -*- __author__ = 'Michael Liao (askxuefeng@gmail.com)' from google.appengine.ext import db class City(db.Model): name = db.StringProperty(required=True) aliases = db.StringListProperty(required=True) code = db.IntegerProperty(required=True) def first_alias(self): return self.aliases[0] def aliases_str(self): return ', '.join(self.aliases) def get_city(key=None): city = None if key: city = City.get(key) if city is None: city = find_city('beijing') return city def get_cities(): return City.all().order('aliases').fetch(1000) def find_city(name, return_default=True): ''' Find city by name. Return City or None if not found. ''' city = City.all().filter('aliases =', name).get() if city is None: city = City.all().filter('name =', name).get() if city is None and return_default: city = City.all().filter('aliases =', 'beijing').get() return city def create_city(name, aliases, code): c = City(name=name, aliases=aliases, code=code) c.put() return c def delete_city(key): City.get(key).delete() import urllib import datetime from xml.parsers.expat import ParserCreate codes = { 0 : u'龙卷风', # tornado 1 : u'热带风暴', # tropical storm 2 : u'飓风', # hurricane 3 : u'风暴', # severe thunderstorms 4 : u'雷雨', # thunderstorms 5 : u'雨夹雪', # mixed rain and snow 6 : u'雨夹冰雹', # mixed rain and sleet 7 : u'雪夹冰雹', # mixed snow and sleet 8 : u'冰毛毛雨', # freezing drizzle 9 : u'毛毛雨', # drizzle 10 : u'冰雨', # freezing rain 11 : u'阵雨', # showers 12 : u'阵雨', # showers 13 : u'小雪', # snow flurries 14 : u'小雨雪', # light snow showers 15 : u'风雪', # blowing snow 16 : u'下雪', # snow 17 : u'冰雹', # hail 18 : u'雨夹雪', # sleet 19 : u'尘土', # dust 20 : u'雾', # foggy 21 : u'霾', # haze 22 : u'烟雾', # smoky 23 : u'狂风', # blustery 24 : u'大风', # windy 25 : u'寒冷', # cold 26 : u'多云', # cloudy 27 : u'多云', # mostly cloudy (night) 28 : u'多云', # mostly cloudy (day) 29 : u'局部多云', # partly cloudy (night) 30 : u'局部多云', # partly cloudy (day) 31 : u'晴朗', # clear (night) 32 : u'晴', # sunny 33 : u'晴朗', # fair (night) 34 : u'晴朗', # fair (day) 35 : u'雨夹冰雹', # mixed rain and hail 36 : u'炎热', # hot 37 : u'局部雷雨', # isolated thunderstorms 38 : u'零星雷雨', # scattered thunderstorms 39 : u'零星雷雨', # scattered thunderstorms 40 : u'零星阵雨', # scattered showers 41 : u'大雪', # heavy snow 42 : u'零星雨夹雪', # scattered snow showers 43 : u'大雪', # heavy snow 44 : u'局部多云', # partly cloudy 45 : u'雷阵雨', # thundershowers 46 : u'小雪', # snow showers 47 : u'局部雷雨', # isolated thundershowers 3200 : u'暂无数据' # not available } def load_rss(url): f = urllib.urlopen(url) data = f.read() f.close() return data class Wind(object): def __init__(self, chill, direction, speed): self.chill = chill self.direction = direction self.speed = speed def __str__(self): return r'{"chill" : %s, "direction" : %s, "speed" : %s}' % (self.chill, self.direction, self.speed) __repr__ = __str__ class Atmosphere(object): def __init__(self, humidity, visibility, pressure, rising): self.humidity = humidity self.visibility = visibility self.pressure = pressure self.rising = rising def __str__(self): return r'{"humidity" : %s, "visibility" : %s, "pressure" : %s, "rising": %s}' % (self.humidity, self.visibility, self.pressure, self.rising) __repr__ = __str__ class Astronomy(object): def __init__(self, sunrise, sunset): self.sunrise = sunrise self.sunset = sunset def __str__(self): return r'{"sunrise" : "%s", "sunset": "%s"}' % (self.sunrise, self.sunset) __repr__ = __str__ class Forecast(object): '<yweather:forecast day="Wed" date="30 Jun 2010" low="24" high="30" text="Mostly Cloudy" code="28" />' def __init__(self, day, date, low, high, code): self.day = day self.date = date self.low = low self.high = high self.code = code def __str__(self): return u'{"date" : "%s", "day" : %s, "code" : %s, "text" : "%s", "low" : %d, "high" : %d, "image_large" : "%s", "image_small" : "%s"}' % ( self.date, self.day, self.code, codes[self.code], self.low, self.high, "http://l.yimg.com/a/i/us/nws/weather/gr/%sd.png" % self.code, "http://l.yimg.com/a/i/us/nws/weather/gr/%ss.png" % self.code, ) __repr__ = __str__ def index_of(list, data): for i, item in enumerate(list): if data==item: return i return None def get_day(day): return index_of(('Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'), day) def get_date(date): '30 Jun 2010' ss = date.split(' ') month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), ss[1]) return datetime.date(int(ss[2]), month, int(ss[0])) def to_24hour(time): ' convert "4:39 pm" to "16:39" ' if time.endswith(' am'): return time[:-3] if time.endswith(' pm'): time = time[:-3] n = time.find(':') to_24h = int(time[:n]) + 12 return "%d:%s" % (to_24h, time[n+1:]) return time class Weather(object): def char_data(self, text): if self.__isLastBuildDate: n = text.find(', ') text = text[n+2:] n1 = text.find(' ') n2 = text.find(' ', n1+1) m = text[n1+1:n2] month = index_of(('', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'), m) text = text.replace(m, str(month)) if not text.endswith(' CST'): return text = text[:-4] is_pm = text.endswith(' pm') text = text[:-3] time = datetime.datetime.strptime(text, '%d %m %Y %I:%M') h = time.hour if is_pm: h = h + 12 self.pub = '%d-%#02d-%#02d %#02d:%#02d' % (time.year, time.month, time.day, h, time.minute) def end_element(self, name): if name=='lastBuildDate': self.__isLastBuildDate = False def start_element(self, name, attrs): if name=='lastBuildDate': self.__isLastBuildDate = True return if name=='yweather:forecast': self.forecasts.append(Forecast( get_day(attrs['day']), get_date(attrs['date']), int(attrs['low']), int(attrs['high']), int(attrs['code']) )) if name=='yweather:astronomy': self.astronomy.sunrise = to_24hour(attrs['sunrise']) self.astronomy.sunset = to_24hour(attrs['sunset']) if name=='yweather:atmosphere': self.atmosphere.humidity = attrs['humidity'] self.atmosphere.visibility = attrs['visibility'] self.atmosphere.pressure = attrs['pressure'] self.atmosphere.rising = attrs['rising'] if name=='yweather:wind': self.wind.chill = attrs['chill'] self.wind.direction = attrs['direction'] self.wind.speed = attrs['speed'] def __init__(self, data): self.__isLastBuildDate = False self.pub = None self.wind = Wind(None, None, None) self.atmosphere = Atmosphere(None, None, None, None) self.astronomy = Astronomy(None, None) self.forecasts = [] parser = ParserCreate() parser.returns_unicode = False parser.StartElementHandler = self.start_element parser.EndElementHandler = self.end_element parser.CharacterDataHandler = self.char_data parser.Parse(data) def __str__(self): pub = 'null' if self.pub: pub = r'"%s"' % self.pub return u'{"pub" : %s, "wind" : %s, "astronomy" : %s, "atmosphere" : %s, "forecasts" : %s}' \ % (pub, self.wind, self.astronomy, self.atmosphere, self.forecasts) __repr__ = __str__ class Subscriber(db.Model): mobile = db.StringProperty(required=True) city = db.StringProperty(required=True) time = db.IntegerProperty(required=True)
[ [ 14, 0, 0.0151, 0.0038, 0, 0.66, 0, 777, 1, 0, 0, 0, 0, 3, 0 ], [ 1, 0, 0.0226, 0.0038, 0, 0.66, 0.0455, 167, 0, 1, 0, 0, 167, 0, 0 ], [ 3, 0, 0.0472, 0.0377, 0, 0...
[ "__author__ = 'Michael Liao (askxuefeng@gmail.com)'", "from google.appengine.ext import db", "class City(db.Model):\n name = db.StringProperty(required=True)\n aliases = db.StringListProperty(required=True)\n code = db.IntegerProperty(required=True)\n\n def first_alias(self):\n return self.al...
#!/usr/bin/env python # -*- coding: UTF-8 -*- ################################################## ## DEPENDENCIES import sys import os import os.path import __builtin__ from os.path import getmtime, exists import time import types from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple from Cheetah.Template import Template from Cheetah.DummyTransaction import * from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList from Cheetah.CacheRegion import CacheRegion import Cheetah.Filters as Filters import Cheetah.ErrorCatchers as ErrorCatchers ################################################## ## MODULE CONSTANTS VFFSL=valueFromFrameOrSearchList VFSL=valueFromSearchList VFN=valueForName currentTime=time.time __CHEETAH_version__ = '2.4.1' __CHEETAH_versionTuple__ = (2, 4, 1, 'final', 0) __CHEETAH_genTime__ = 1284450634.7130001 __CHEETAH_genTimestamp__ = 'Tue Sep 14 15:50:34 2010' __CHEETAH_src__ = 'D:\\workspace\\python\\weather-china\\src\\home.html' __CHEETAH_srcLastModified__ = 'Wed Jul 28 10:35:46 2010' __CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine' if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple: raise AssertionError( 'This template was compiled with Cheetah version' ' %s. Templates compiled before version %s must be recompiled.'%( __CHEETAH_version__, RequiredCheetahVersion)) ################################################## ## CLASSES class CompiledTemplate(Template): ################################################## ## CHEETAH GENERATED METHODS def __init__(self, *args, **KWs): super(CompiledTemplate, self).__init__(*args, **KWs) if not self._CHEETAH__instanceInitialized: cheetahKWArgs = {} allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split() for k,v in KWs.items(): if k in allowedKWs: cheetahKWArgs[k] = v self._initCheetahInstance(**cheetahKWArgs) def respond(self, trans=None): ## CHEETAH: main method generated for this template if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)): trans = self.transaction # is None unless self.awake() was called if not trans: trans = DummyTransaction() _dummyTrans = True else: _dummyTrans = False write = trans.response().write SL = self._CHEETAH__searchList _filter = self._CHEETAH__currentFilter ######################################## ## START - generated method body write(u'''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>\u5929\u6c14\u9884\u62a5</title> <script type="text/javascript" src="/static/js/jquery.js"></script> <script type="text/javascript"> var days=["\u661f\u671f\u65e5", "\u661f\u671f\u4e00", "\u661f\u671f\u4e8c", "\u661f\u671f\u4e09", "\u661f\u671f\u56db", "\u661f\u671f\u4e94", "\u661f\u671f\u516d"] jQuery(document).ready(function() { jQuery.getJSON("/api?city=''') _v = VFSL([locals()]+SL+[globals(), __builtin__],"city.first_alias",True) # u'${city.first_alias}' on line 11, col 29 if _v is not None: write(_filter(_v, rawExpr=u'${city.first_alias}')) # from line 11, col 29. write(u'''", function(data) { var today = data.forecasts[0]; \tvar tomorrow = data.forecasts[1]; jQuery("#x-today-date").html(today.date); jQuery("#x-tomorrow-date").html(tomorrow.date); jQuery("#x-today-day").html(days[today.day]); jQuery("#x-tomorrow-day").html(days[tomorrow.day]); jQuery("#x-today-text").html(today.text); jQuery("#x-tomorrow-text").html(tomorrow.text); jQuery("#x-today-temp").html(today.low + " ~ " + today.high + "\xb0"); \tjQuery("#x-tomorrow-temp").html(tomorrow.low + " ~ " + tomorrow.high + "\xb0"); jQuery("#x-today-icon").css("background-image", "url(" + today.image_large + ")"); \tjQuery("#x-tomorrow-icon").css("background-image", "url(" + tomorrow.image_large + ")"); \tjQuery("#x-today-icon-small").css("background-image", "url(" + today.image_small + ")"); jQuery("#x-pub").html(data.pub); \tif (data.wind.chill!=null) \t jQuery("#x-wind-chill").html(data.wind.chill); \tif (data.wind.direction!=null) \t jQuery("#x-wind-direction").html(data.wind.direction); \tif (data.wind.speed!=null) \t jQuery("#x-wind-speed").html(data.wind.speed); if (data.atmosphere.humidity!=null) \t jQuery("#x-atmosphere-humidity").html(data.atmosphere.humidity); if (data.atmosphere.visibility!=null) \t jQuery("#x-atmosphere-visibility").html(data.atmosphere.visibility); if (data.atmosphere.pressure!=null) \t jQuery("#x-atmosphere-pressure").html(data.atmosphere.pressure); if (data.astronomy.sunrise!=null) \t jQuery("#x-astronomy-sunrise").html(data.astronomy.sunrise); if (data.astronomy.sunset!=null) \t jQuery("#x-astronomy-sunset").html(data.astronomy.sunset); }); }); function change_city(key){ if (key=="-") return; location.assign("/?city=" + key); } </script> <link rel="stylesheet" href="/static/css/screen.css" type="text/css" media="screen, projection"> <link rel="stylesheet" href="/static/css/print.css" type="text/css" media="print"> <!--[if lt IE 8]> \t<link rel="stylesheet" href="/static/css/ie.css" type="text/css" media="screen, projection"> <![endif]--> <style type="text/css"> div.w-report span.h { \tmargin:3px 0px; \tfont-weight:bold; font-size:24px; \tdisplay:inline; } div.w-report span.date { \tmargin:3px 0px 3px 12px; \tfont-weight:bold; \tfont-size:16px; } div.weather-report { \tbackground-image:url(static/img/w-bg.png); \tbackground-repeat:no-repeat; \tbackground-position:56px 70px; \tmargin:0px; \tpadding:0px; \twidth:300px; \theight:160px; } div.weather-icon { \tbackground-image:url(static/w/img/d44.png); \tbackground-repeat:no-repeat; \tmargin:0px; \tpadding:0px; \twidth:300px; \theight:160px; } div.weather-text { \ttext-align:right; \tmargin:0px; \tpadding-top:76px; \tpadding-right:20px; } div.weather-text p { \tmargin:0px; \tcolor:#FFF; \tfont-size: 20px; \tfont-weight: bold; \ttext-shadow: #315895 0px -1px 1px; \tline-height:28px; } </style> <script type="text/javascript"> var _gaq = _gaq || []; _gaq.push([\'_setAccount\', \'UA-251595-22\']); _gaq.push([\'_trackPageview\']); (function() { var ga = document.createElement(\'script\'); ga.type = \'text/javascript\'; ga.async = true; ga.src = (\'https:\' == document.location.protocol ? \'https://ssl\' : \'http://www\') + \'.google-analytics.com/ga.js\'; var s = document.getElementsByTagName(\'script\')[0]; s.parentNode.insertBefore(ga, s); })(); </script> </head> <body style="font-size:13px"> <div class="container" style="background-color:#FFF"> <div class="span-24 last"> </div> <div class="span-24 last"> <div id="x-today-icon-small" style="background-repeat:no-repeat; height:34; padding:10px 0px 10px 60px; background-image:url(static/w/img/s44.png)"><strong>''') _v = VFSL([locals()]+SL+[globals(), __builtin__],"city.name",True) # u'${city.name}' on line 125, col 163 if _v is not None: write(_filter(_v, rawExpr=u'${city.name}')) # from line 125, col 163. write(u'''</strong> <select name="change_city" id="change_city" onchange="change_city(this.value)"> <option value="-">\u66f4\u6539\u57ce\u5e02</option> ''') for c in VFSL([locals()]+SL+[globals(), __builtin__],"cities",True): # generated from line 128, col 1 write(u''' <option value="''') _v = VFN(VFSL([locals()]+SL+[globals(), __builtin__],"c",True),"first_alias",False)() # u'${c.first_alias()}' on line 129, col 26 if _v is not None: write(_filter(_v, rawExpr=u'${c.first_alias()}')) # from line 129, col 26. write(u'''">''') _v = VFSL([locals()]+SL+[globals(), __builtin__],"c.name",True) # u'${c.name}' on line 129, col 46 if _v is not None: write(_filter(_v, rawExpr=u'${c.name}')) # from line 129, col 46. write(u'''</option> ''') write(u''' </select> </div> </div> \t<div class="span-16"> <div class="span-16 last"> <div id="weather-today" class="w-report span-8"> <div><span class="h">\u4eca\u65e5\u5929\u6c14</span><span class="date"><span id="x-today-date"></span> <span id="x-today-day"></span></span></div> <div class="weather-report"> <div id="x-today-icon" class="weather-icon"> <div class="weather-text"> <p id="x-today-text">Loading...</p> <p id="x-today-temp"></p> </div> </div> </div> <div><span class="h">\u5176\u4ed6\u4fe1\u606f\uff1a</span></div> <div style="padding:6px"> <div>\u98ce\u529b\uff1a<span id="x-wind-chill">N/A</span> \u98ce\u5411\uff1a<span id="x-wind-direction">N/A</span> \u98ce\u901f\uff1a<span id="x-wind-speed">N/A</span></div> <div>\u80fd\u89c1\u5ea6\uff1a<span id="x-atmosphere-visibility">N/A</span> \u6e7f\u5ea6\uff1a<span id="x-atmosphere-humidity">N/A</span> \u6c14\u538b\uff1a<span id="x-atmosphere-pressure">N/A</span></div> <div>\u65e5\u51fa\uff1a<span id="x-astronomy-sunrise">N/A</span> \u65e5\u843d\uff1a<span id="x-astronomy-sunset">N/A</span></div> <div>\u53d1\u5e03\u4e8e\uff1a<span id="x-pub">N/A</span></div> </div> </div> <div id="weather-tomorrow" class="w-report span-8 last"> <div><span class="h">\u660e\u65e5\u5929\u6c14</span><span class="date"><span id="x-tomorrow-date"></span> <span id="x-tomorrow-day"></span></span></div> <div class="weather-report"> <div id="x-tomorrow-icon" class="weather-icon"> <div class="weather-text"> <p id="x-tomorrow-text">Loading...</p> <p id="x-tomorrow-temp"></p> </div> </div> </div> </div> </div> <div class="w-report span-16 last" style="margin-top:6px"> <div><span class="h">\u5b89\u88c5Chrome\u63d2\u4ef6</span></div> <div style="padding:6px"> <div>\u5982\u679c\u60a8\u4f7f\u7528\u7684\u662f\u652f\u6301HTML 5\u7684Google Chrome\u6d4f\u89c8\u5668\uff0c\u53ef\u4ee5<a href="https://chrome.google.com/extensions/detail/gbmkicglakjoppnghhiceacmbbaihoeh" target="_blank">\u5b89\u88c5\u6700\u65b0\u63d2\u4ef6</a>\u4ee5\u4fbf\u968f\u65f6\u83b7\u53d6\u5929\u6c14\u9884\u62a5\uff1a</div> <div><a href="https://chrome.google.com/extensions/detail/gbmkicglakjoppnghhiceacmbbaihoeh" target="_blank"><img src="static/img/snapshot-chrome-extension.png" width="291" height="99" style="margin:12px"/></a></div> </div> </div> <div class="w-report span-16 last" style="margin-top:6px"> <div><span class="h">GTalk\u673a\u5668\u4eba</span></div> <div style="padding:6px"> <div>\u5982\u679c\u60a8\u4f7f\u7528Google Talk\uff0c\u53ef\u4ee5\u6dfb\u52a0\u673a\u5668\u4eba<strong>weather-china@appspot.com</strong>\u4e3a\u597d\u53cb\uff0c\u968f\u65f6\u5411\u4ed6\u8be2\u95ee\u5929\u6c14\u9884\u62a5\uff1a</div> <div><img src="static/img/snapshot-xmpp.png" width="300" height="254" style="margin:12px"/></div> </div> </div> </div> <div class="span-8 last"> <script type="text/javascript"><!-- google_ad_client = "pub-6727358730461554"; /* 300x250 */ google_ad_slot = "8201905603"; google_ad_width = 300; google_ad_height = 250; //--> </script> <script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script> <script type="text/javascript"><!-- google_ad_client = "pub-6727358730461554"; /* 300x250 */ google_ad_slot = "8201905603"; google_ad_width = 300; google_ad_height = 250; //--> </script> <script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script> <script type="text/javascript"><!-- google_ad_client = "pub-6727358730461554"; /* 300x250 */ google_ad_slot = "8201905603"; google_ad_width = 300; google_ad_height = 250; //--> </script> <script type="text/javascript" src="http://pagead2.googlesyndication.com/pagead/show_ads.js"></script> </div> <div class="span-24 last"></div> <div class="span-24 last"><div style="text-align:center;padding:6px"><a href="http://code.google.com/p/weather-china/wiki/API" target="_blank">API\u670d\u52a1</a> | <a href="http://code.google.com/p/weather-china/issues/list" target="_blank">\u610f\u89c1\u53cd\u9988</a> | <a id="x-contact" href="#">\u8054\u7cfb\u6211\u4eec</a> | Copyright&copy;2010</div></div> </div> <script type="text/javascript"> jQuery("#x-contact").attr("href", "mail" + "to:ask" + "xuefeng@" + "gm" + "ail.com"); </script> </body> </html> ''') ######################################## ## END - generated method body return _dummyTrans and trans.response().getvalue() or "" ################################################## ## CHEETAH GENERATED ATTRIBUTES _CHEETAH__instanceInitialized = False _CHEETAH_version = __CHEETAH_version__ _CHEETAH_versionTuple = __CHEETAH_versionTuple__ _CHEETAH_genTime = __CHEETAH_genTime__ _CHEETAH_genTimestamp = __CHEETAH_genTimestamp__ _CHEETAH_src = __CHEETAH_src__ _CHEETAH_srcLastModified = __CHEETAH_srcLastModified__ _mainCheetahMethod_for_CompiledTemplate= 'respond' ## END CLASS DEFINITION if not hasattr(CompiledTemplate, '_initCheetahAttributes'): templateAPIClass = getattr(CompiledTemplate, '_CHEETAH_templateClass', Template) templateAPIClass._addCheetahPlumbingCodeToClass(CompiledTemplate) # CHEETAH was developed by Tavis Rudd and Mike Orr # with code, advice and input from many other volunteers. # For more information visit http://www.CheetahTemplate.org/ ################################################## ## if run from command line: if __name__ == '__main__': from Cheetah.TemplateCmdLineIface import CmdLineIface CmdLineIface(templateObj=CompiledTemplate()).run()
[ [ 1, 0, 0.0253, 0.0028, 0, 0.66, 0, 509, 0, 1, 0, 0, 509, 0, 0 ], [ 1, 0, 0.0281, 0.0028, 0, 0.66, 0.0345, 688, 0, 1, 0, 0, 688, 0, 0 ], [ 1, 0, 0.0309, 0.0028, 0, ...
[ "import sys", "import os", "import os.path", "import __builtin__", "from os.path import getmtime, exists", "import time", "import types", "from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion", "from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTupl...
''' Cookie-handlin' mix-in helper; inspired by WebOb. This module offers a cookie-handling mixin class meant to be used with Google App Engine; this class can in fact be mixed into any class that shares the following features with webapp.RequestHandler subclasses: - a self.request.cookies object with a get(key, defaultvalue) method - a self.response.headers object offering: - methods add_header(header, value) and getall(header) - the ability to 'del self.response.headers[header]' The mixin class supplies methods to get_, set_, delete_ and unset_ a cookie (each method's name ends with _cookie;-). ''' # Copyright (C) 2008 aleaxit@gmail.com # licensed under CC-by license, http://creativecommons.org/licenses/by/3.0/ import Cookie import datetime import time from Cookie import BaseCookie def _serialize_cookie_date(dt): dt = dt.timetuple() return time.strftime('"%a, %d-%b-%Y %H:%M:%S GMT"', dt.timetuple()) class CookieMixin(object): def get_cookie(self, key, default_value=None): """ Gets a cookie from the request object: Args: key: string that's the cookie's name (mandatory) default_value: default value if name's absent (default: None) Returns: a string (the cookie's value) or the default value if the cookie's absent """ return self.request.cookies.get(key, default_value) def set_cookie(self, key, value='', max_age=None, path='/', domain=None, secure=None, httponly=False, version=None, comment=None, expires=None): """ Set (add) a cookie to the response object. Args: key: string that is the cookie's name (mandatory) value: string (or Unicode) that is the cookie's value (default '') and many optional ones to set the cookie's properties (pass BY NAME only!): max_age (or datetime.timedelta or a number of seconds) expires (string, datetime.timedelta, or datetime.datetime) [if you pass max_age and not expires, expires is computed from max_age] path, domain, secure, httponly, version, comment (typically strings) Side effects: adds to self.response.headers an appropriate Set-Cookie header. """ if isinstance(value, unicode): value = '"%s"' % value.encode('utf8') cookies = Cookie.BaseCookie() cookies[key] = value if isinstance(max_age, datetime.timedelta): max_age = datetime.timedelta.seconds + datetime.timedelta.days*24*60*60 if max_age is not None and expires is None: expires = (datetime.datetime.utcnow() + datetime.timedelta(seconds=max_age)) if isinstance(expires, datetime.timedelta): expires = datetime.datetime.utcnow() + expires if isinstance(expires, datetime.datetime): expires = '"'+_serialize_cookie_date(expires)+'"' for var_name, var_value in [ ('max_age', max_age), ('path', path), ('domain', domain), ('secure', secure), ('HttpOnly', httponly), ('version', version), ('comment', comment), ('expires', expires), ]: if var_value is not None and var_value is not False: cookies[key][var_name.replace('_', '-')] = str(var_value) header_value = cookies[key].output(header='').lstrip() self.response.headers.add_header('Set-Cookie', header_value) def delete_cookie(self, key, path='/', domain=None): """ Delete a cookie from the client. Path and domain must match how the cookie was originally set. This method sets the cookie to the empty string, and max_age=0 so that it should expire immediately (a negative expires should also help with that) Args: key: string that is the cookie's name (mandatory) path, domain: optional strings, must match the original settings Side effects: adds to self.response.headers an appropriate Set-Cookie header. """ self.set_cookie(key, '', path=path, domain=domain, max_age=0, expires=datetime.timedelta(days=-5)) def unset_cookie(self, key): """ Unset a cookie with the given name (remove from the response). If there are multiple cookies (e.g., two cookies with the same name and different paths or domains), all such cookies will be deleted. Args: key: string that is the cookie's name (mandatory) Side effects: delete from self.response.headers all cookies with that name Raises: KeyError if the response had no such cookies (or, none at all) """ existing = self.response.headers.getall('Set-Cookie') if not existing: raise KeyError("No cookies at all had been set") # remove all set-cookie headers, then put back those (if any) that # should not be removed del self.response.headers['Set-Cookie'] found = False for header in existing: cookies = BaseCookie() cookies.load(header) if key in cookies: found = True del cookies[key] header = cookies.output(header='').lstrip() if header: self.response.headers.add_header('Set-Cookie', header) if not found: raise KeyError("No cookie had been set with name %r" % key)
[ [ 8, 0, 0.0538, 0.1, 0, 0.66, 0, 0, 1, 0, 0, 0, 0, 0, 0 ], [ 1, 0, 0.1308, 0.0077, 0, 0.66, 0.1667, 32, 0, 1, 0, 0, 32, 0, 0 ], [ 1, 0, 0.1385, 0.0077, 0, 0.66, ...
[ "''' Cookie-handlin' mix-in helper; inspired by WebOb.\n\nThis module offers a cookie-handling mixin class meant to be used with Google\nApp Engine; this class can in fact be mixed into any class that shares the\nfollowing features with webapp.RequestHandler subclasses:\n - a self.request.cookies object with a get...